mirror of
				https://github.com/go-gitea/gitea.git
				synced 2025-10-26 12:27:06 +00:00 
			
		
		
		
	Rewrite markdown rendering to blackfriday v2 and rewrite orgmode rendering to go-org (#8560)
* Rewrite markdown rendering to blackfriday v2.0 * Fix style * Fix go mod with golang 1.13 * Fix blackfriday v2 import * Inital orgmode renderer migration to go-org * Vendor go-org dependency * Ignore errors :/ * Update go-org to latest version * Update test * Fix go-org test * Remove unneeded code * Fix comments * Fix markdown test * Fix blackfriday regression rendering HTML block
This commit is contained in:
		
							
								
								
									
										9
									
								
								go.mod
									
									
									
									
									
								
							
							
						
						
									
										9
									
								
								go.mod
									
									
									
									
									
								
							| @@ -22,7 +22,6 @@ require ( | ||||
| 	github.com/blevesearch/go-porterstemmer v0.0.0-20141230013033-23a2c8e5cf1f // indirect | ||||
| 	github.com/blevesearch/segment v0.0.0-20160105220820-db70c57796cc // indirect | ||||
| 	github.com/boombuler/barcode v0.0.0-20161226211916-fe0f26ff6d26 // indirect | ||||
| 	github.com/chaseadamsio/goorgeous v0.0.0-20170901132237-098da33fde5f | ||||
| 	github.com/couchbase/vellum v0.0.0-20190111184608-e91b68ff3efe // indirect | ||||
| 	github.com/cznic/b v0.0.0-20181122101859-a26611c4d92d // indirect | ||||
| 	github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 // indirect | ||||
| @@ -73,6 +72,7 @@ require ( | ||||
| 	github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae // indirect | ||||
| 	github.com/msteinert/pam v0.0.0-20151204160544-02ccfbfaf0cc | ||||
| 	github.com/nfnt/resize v0.0.0-20160724205520-891127d8d1b5 | ||||
| 	github.com/niklasfasching/go-org v0.1.7 | ||||
| 	github.com/oliamb/cutter v0.2.2 | ||||
| 	github.com/philhofer/fwd v1.0.0 // indirect | ||||
| 	github.com/pkg/errors v0.8.1 | ||||
| @@ -80,12 +80,13 @@ require ( | ||||
| 	github.com/prometheus/client_golang v1.1.0 | ||||
| 	github.com/prometheus/procfs v0.0.4 // indirect | ||||
| 	github.com/remyoudompheng/bigfft v0.0.0-20190321074620-2f0d2b0e0001 // indirect | ||||
| 	github.com/russross/blackfriday v0.0.0-20180428102519-11635eb403ff | ||||
| 	github.com/russross/blackfriday v2.0.0+incompatible // indirect | ||||
| 	github.com/russross/blackfriday/v2 v2.0.1 | ||||
| 	github.com/saintfish/chardet v0.0.0-20120816061221-3af4cd4741ca // indirect | ||||
| 	github.com/satori/go.uuid v1.2.0 | ||||
| 	github.com/sergi/go-diff v1.0.0 | ||||
| 	github.com/shurcooL/httpfs v0.0.0-20190527155220-6a4d4a70508b // indirect | ||||
| 	github.com/shurcooL/sanitized_anchor_name v0.0.0-20160918041101-1dba4b3954bc // indirect | ||||
| 	github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect | ||||
| 	github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd | ||||
| 	github.com/steveyen/gtreap v0.0.0-20150807155958-0abe01ef9be2 // indirect | ||||
| 	github.com/stretchr/testify v1.4.0 | ||||
| @@ -100,7 +101,7 @@ require ( | ||||
| 	github.com/willf/bitset v0.0.0-20180426185212-8ce1146b8621 // indirect | ||||
| 	github.com/yohcop/openid-go v0.0.0-20160914080427-2c050d2dae53 | ||||
| 	golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad | ||||
| 	golang.org/x/net v0.0.0-20190909003024-a7b16738d86b | ||||
| 	golang.org/x/net v0.0.0-20191028085509-fe3aa8a45271 | ||||
| 	golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 | ||||
| 	golang.org/x/sys v0.0.0-20190910064555-bbd175535a8b | ||||
| 	golang.org/x/text v0.3.2 | ||||
|   | ||||
							
								
								
									
										16
									
								
								go.sum
									
									
									
									
									
								
							
							
						
						
									
										16
									
								
								go.sum
									
									
									
									
									
								
							| @@ -86,8 +86,6 @@ github.com/boombuler/barcode v0.0.0-20161226211916-fe0f26ff6d26/go.mod h1:paBWMc | ||||
| github.com/bradfitz/gomemcache v0.0.0-20190329173943-551aad21a668 h1:U/lr3Dgy4WK+hNk4tyD+nuGjpVLPEHuJSFXMw11/HPA= | ||||
| github.com/bradfitz/gomemcache v0.0.0-20190329173943-551aad21a668/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA= | ||||
| github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= | ||||
| github.com/chaseadamsio/goorgeous v0.0.0-20170901132237-098da33fde5f h1:REH9VH5ubNR0skLaOxK7TRJeRbE2dDfvaouQo8FsRcA= | ||||
| github.com/chaseadamsio/goorgeous v0.0.0-20170901132237-098da33fde5f/go.mod h1:6QaC0vFoKWYDth94dHFNgRT2YkT5FHdQp/Yx15aAAi0= | ||||
| github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= | ||||
| github.com/corbym/gocrest v1.0.3 h1:gwEdq6RkTmq+09CTuM29DfKOCtZ7G7bcyxs3IZ6EVdU= | ||||
| github.com/corbym/gocrest v1.0.3/go.mod h1:maVFL5lbdS2PgfOQgGRWDYTeunSWQeiEgoNdTABShCs= | ||||
| @@ -425,6 +423,10 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW | ||||
| github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= | ||||
| github.com/nfnt/resize v0.0.0-20160724205520-891127d8d1b5 h1:BvoENQQU+fZ9uukda/RzCAL/191HHwJA5b13R6diVlY= | ||||
| github.com/nfnt/resize v0.0.0-20160724205520-891127d8d1b5/go.mod h1:jpp1/29i3P1S/RLdc7JQKbRpFeM1dOBd8T9ki5s+AY8= | ||||
| github.com/niklasfasching/go-org v0.1.6 h1:F521WcqRNl8OJumlgAnekZgERaTA2HpfOYYfVEKOeI8= | ||||
| github.com/niklasfasching/go-org v0.1.6/go.mod h1:AsLD6X7djzRIz4/RFZu8vwRL0VGjUvGZCCH1Nz0VdrU= | ||||
| github.com/niklasfasching/go-org v0.1.7 h1:t3V+3XnS/7BhKv/7SlMUa8FvAiq577/a1T3D7mLIRXE= | ||||
| github.com/niklasfasching/go-org v0.1.7/go.mod h1:AsLD6X7djzRIz4/RFZu8vwRL0VGjUvGZCCH1Nz0VdrU= | ||||
| github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= | ||||
| github.com/oliamb/cutter v0.2.2 h1:Lfwkya0HHNU1YLnGv2hTkzHfasrSMkgv4Dn+5rmlk3k= | ||||
| github.com/oliamb/cutter v0.2.2/go.mod h1:4BenG2/4GuRBDbVm/OPahDVqbrOemzpPiG5mi1iryBU= | ||||
| @@ -487,8 +489,10 @@ github.com/remyoudompheng/bigfft v0.0.0-20190321074620-2f0d2b0e0001/go.mod h1:qq | ||||
| github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= | ||||
| github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= | ||||
| github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= | ||||
| github.com/russross/blackfriday v0.0.0-20180428102519-11635eb403ff h1:g9ZlAHmkc/h5So+OjNCkZWh+FjuKEOOOoyRkqlGA8+c= | ||||
| github.com/russross/blackfriday v0.0.0-20180428102519-11635eb403ff/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= | ||||
| github.com/russross/blackfriday v2.0.0+incompatible h1:cBXrhZNUf9C+La9/YpS+UHpUT8YD6Td9ZMSU9APFcsk= | ||||
| github.com/russross/blackfriday v2.0.0+incompatible/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= | ||||
| github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= | ||||
| github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= | ||||
| github.com/saintfish/chardet v0.0.0-20120816061221-3af4cd4741ca h1:NugYot0LIVPxTvN8n+Kvkn6TrbMyxQiuvKdEwFdR9vI= | ||||
| github.com/saintfish/chardet v0.0.0-20120816061221-3af4cd4741ca/go.mod h1:uugorj2VCxiV1x+LzaIdVa9b4S4qGAcH6cbhh4qVxOU= | ||||
| github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= | ||||
| @@ -499,6 +503,8 @@ github.com/shurcooL/httpfs v0.0.0-20190527155220-6a4d4a70508b h1:4kg1wyftSKxLtnP | ||||
| github.com/shurcooL/httpfs v0.0.0-20190527155220-6a4d4a70508b/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= | ||||
| github.com/shurcooL/sanitized_anchor_name v0.0.0-20160918041101-1dba4b3954bc h1:3wIrJvFb3Pf6B/2mDBnN1G5IfUVev4X5apadQlWOczE= | ||||
| github.com/shurcooL/sanitized_anchor_name v0.0.0-20160918041101-1dba4b3954bc/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= | ||||
| github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= | ||||
| github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= | ||||
| github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd h1:ug7PpSOB5RBPK1Kg6qskGBoP3Vnj/aNYFTznWvlkGo0= | ||||
| github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= | ||||
| github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726/go.mod h1:3yhqj7WBBfRhbBlzyOC3gUxftwsU0u8gqevxwIHQpMw= | ||||
| @@ -650,6 +656,8 @@ golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k | ||||
| golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= | ||||
| golang.org/x/net v0.0.0-20190909003024-a7b16738d86b h1:XfVGCX+0T4WOStkaOsJRllbsiImhB2jgVBGc9L0lPGc= | ||||
| golang.org/x/net v0.0.0-20190909003024-a7b16738d86b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= | ||||
| golang.org/x/net v0.0.0-20191028085509-fe3aa8a45271 h1:N66aaryRB3Ax92gH0v3hp1QYZ3zWWCCUR/j8Ifh45Ss= | ||||
| golang.org/x/net v0.0.0-20191028085509-fe3aa8a45271/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= | ||||
| golang.org/x/oauth2 v0.0.0-20180620175406-ef147856a6dd/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= | ||||
| golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= | ||||
| golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 h1:Wo7BWFiOk0QRFMLYMqJGFMd9CgUAcGx7V+qEg/h5IBI= | ||||
|   | ||||
| @@ -323,6 +323,6 @@ func TestRender_ShortLinks(t *testing.T) { | ||||
| 		`<p><a href="`+notencodedImgurlWiki+`" rel="nofollow"><img src="`+notencodedImgurlWiki+`"/></a></p>`) | ||||
| 	test( | ||||
| 		"<p><a href=\"https://example.org\">[[foobar]]</a></p>", | ||||
| 		`<p><a href="https://example.org" rel="nofollow">[[foobar]]</a></p>`, | ||||
| 		`<p><a href="https://example.org" rel="nofollow">[[foobar]]</a></p>`) | ||||
| 		`<p></p><p><a href="https://example.org" rel="nofollow">[[foobar]]</a></p><p></p>`, | ||||
| 		`<p></p><p><a href="https://example.org" rel="nofollow">[[foobar]]</a></p><p></p>`) | ||||
| } | ||||
|   | ||||
| @@ -7,13 +7,14 @@ package markdown | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"io" | ||||
| 	"strings" | ||||
|  | ||||
| 	"code.gitea.io/gitea/modules/markup" | ||||
| 	"code.gitea.io/gitea/modules/setting" | ||||
| 	"code.gitea.io/gitea/modules/util" | ||||
|  | ||||
| 	"github.com/russross/blackfriday" | ||||
| 	"github.com/russross/blackfriday/v2" | ||||
| ) | ||||
|  | ||||
| // Renderer is a extended version of underlying render object. | ||||
| @@ -25,134 +26,138 @@ type Renderer struct { | ||||
|  | ||||
| var byteMailto = []byte("mailto:") | ||||
|  | ||||
| // Link defines how formal links should be processed to produce corresponding HTML elements. | ||||
| func (r *Renderer) Link(out *bytes.Buffer, link []byte, title []byte, content []byte) { | ||||
| 	// special case: this is not a link, a hash link or a mailto:, so it's a | ||||
| 	// relative URL | ||||
| 	if len(link) > 0 && !markup.IsLink(link) && | ||||
| 		link[0] != '#' && !bytes.HasPrefix(link, byteMailto) { | ||||
| 		lnk := string(link) | ||||
| var htmlEscaper = [256][]byte{ | ||||
| 	'&': []byte("&"), | ||||
| 	'<': []byte("<"), | ||||
| 	'>': []byte(">"), | ||||
| 	'"': []byte("""), | ||||
| } | ||||
|  | ||||
| func escapeHTML(w io.Writer, s []byte) { | ||||
| 	var start, end int | ||||
| 	for end < len(s) { | ||||
| 		escSeq := htmlEscaper[s[end]] | ||||
| 		if escSeq != nil { | ||||
| 			_, _ = w.Write(s[start:end]) | ||||
| 			_, _ = w.Write(escSeq) | ||||
| 			start = end + 1 | ||||
| 		} | ||||
| 		end++ | ||||
| 	} | ||||
| 	if start < len(s) && end <= len(s) { | ||||
| 		_, _ = w.Write(s[start:end]) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // RenderNode is a default renderer of a single node of a syntax tree. For | ||||
| // block nodes it will be called twice: first time with entering=true, second | ||||
| // time with entering=false, so that it could know when it's working on an open | ||||
| // tag and when on close. It writes the result to w. | ||||
| // | ||||
| // The return value is a way to tell the calling walker to adjust its walk | ||||
| // pattern: e.g. it can terminate the traversal by returning Terminate. Or it | ||||
| // can ask the walker to skip a subtree of this node by returning SkipChildren. | ||||
| // The typical behavior is to return GoToNext, which asks for the usual | ||||
| // traversal to the next node. | ||||
| func (r *Renderer) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus { | ||||
| 	switch node.Type { | ||||
| 	case blackfriday.Image: | ||||
| 		prefix := r.URLPrefix | ||||
| 		if r.IsWiki { | ||||
| 			lnk = util.URLJoin("wiki", lnk) | ||||
| 			prefix = util.URLJoin(prefix, "wiki", "raw") | ||||
| 		} | ||||
| 		mLink := util.URLJoin(r.URLPrefix, lnk) | ||||
| 		link = []byte(mLink) | ||||
| 	} | ||||
|  | ||||
| 	if len(content) > 10 && string(content[0:9]) == "<a href=\"" && bytes.Contains(content[9:], []byte("<img")) { | ||||
| 		// Image with link case: markdown `[![]()]()` | ||||
| 		// If the content is an image, then we change the original href around it | ||||
| 		// which points to itself to a new address "link" | ||||
| 		rightQuote := bytes.Index(content[9:], []byte("\"")) | ||||
| 		content = bytes.Replace(content, content[9:9+rightQuote], link, 1) | ||||
| 		out.Write(content) | ||||
| 	} else { | ||||
| 		r.Renderer.Link(out, link, title, content) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // List renders markdown bullet or digit lists to HTML | ||||
| func (r *Renderer) List(out *bytes.Buffer, text func() bool, flags int) { | ||||
| 	marker := out.Len() | ||||
| 	if out.Len() > 0 { | ||||
| 		out.WriteByte('\n') | ||||
| 	} | ||||
|  | ||||
| 	if flags&blackfriday.LIST_TYPE_DEFINITION != 0 { | ||||
| 		out.WriteString("<dl>") | ||||
| 	} else if flags&blackfriday.LIST_TYPE_ORDERED != 0 { | ||||
| 		out.WriteString("<ol class='ui list'>") | ||||
| 	} else { | ||||
| 		out.WriteString("<ul class='ui list'>") | ||||
| 	} | ||||
| 	if !text() { | ||||
| 		out.Truncate(marker) | ||||
| 		return | ||||
| 	} | ||||
| 	if flags&blackfriday.LIST_TYPE_DEFINITION != 0 { | ||||
| 		out.WriteString("</dl>\n") | ||||
| 	} else if flags&blackfriday.LIST_TYPE_ORDERED != 0 { | ||||
| 		out.WriteString("</ol>\n") | ||||
| 	} else { | ||||
| 		out.WriteString("</ul>\n") | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // ListItem defines how list items should be processed to produce corresponding HTML elements. | ||||
| func (r *Renderer) ListItem(out *bytes.Buffer, text []byte, flags int) { | ||||
| 	// Detect procedures to draw checkboxes. | ||||
| 	prefix := "" | ||||
| 	if bytes.HasPrefix(text, []byte("<p>")) { | ||||
| 		prefix = "<p>" | ||||
| 	} | ||||
| 	switch { | ||||
| 	case bytes.HasPrefix(text, []byte(prefix+"[ ] ")): | ||||
| 		text = append([]byte(`<span class="ui fitted disabled checkbox"><input type="checkbox" disabled="disabled" /><label /></span>`), text[3+len(prefix):]...) | ||||
| 		if prefix != "" { | ||||
| 			text = bytes.Replace(text, []byte(prefix), []byte{}, 1) | ||||
| 		prefix = strings.Replace(prefix, "/src/", "/media/", 1) | ||||
| 		link := node.LinkData.Destination | ||||
| 		if len(link) > 0 && !markup.IsLink(link) { | ||||
| 			lnk := string(link) | ||||
| 			lnk = util.URLJoin(prefix, lnk) | ||||
| 			lnk = strings.Replace(lnk, " ", "+", -1) | ||||
| 			link = []byte(lnk) | ||||
| 		} | ||||
| 	case bytes.HasPrefix(text, []byte(prefix+"[x] ")): | ||||
| 		text = append([]byte(`<span class="ui checked fitted disabled checkbox"><input type="checkbox" checked="" disabled="disabled" /><label /></span>`), text[3+len(prefix):]...) | ||||
| 		if prefix != "" { | ||||
| 			text = bytes.Replace(text, []byte(prefix), []byte{}, 1) | ||||
| 		node.LinkData.Destination = link | ||||
| 		// Render link around image only if parent is not link already | ||||
| 		if node.Parent != nil && node.Parent.Type != blackfriday.Link { | ||||
| 			if entering { | ||||
| 				_, _ = w.Write([]byte(`<a href="`)) | ||||
| 				escapeHTML(w, link) | ||||
| 				_, _ = w.Write([]byte(`">`)) | ||||
| 				return r.Renderer.RenderNode(w, node, entering) | ||||
| 			} | ||||
| 			s := r.Renderer.RenderNode(w, node, entering) | ||||
| 			_, _ = w.Write([]byte(`</a>`)) | ||||
| 			return s | ||||
| 		} | ||||
| 		return r.Renderer.RenderNode(w, node, entering) | ||||
| 	case blackfriday.Link: | ||||
| 		// special case: this is not a link, a hash link or a mailto:, so it's a | ||||
| 		// relative URL | ||||
| 		link := node.LinkData.Destination | ||||
| 		if len(link) > 0 && !markup.IsLink(link) && | ||||
| 			link[0] != '#' && !bytes.HasPrefix(link, byteMailto) && | ||||
| 			node.LinkData.Footnote == nil { | ||||
| 			lnk := string(link) | ||||
| 			if r.IsWiki { | ||||
| 				lnk = util.URLJoin("wiki", lnk) | ||||
| 			} | ||||
| 			link = []byte(util.URLJoin(r.URLPrefix, lnk)) | ||||
| 		} | ||||
| 		node.LinkData.Destination = link | ||||
| 		return r.Renderer.RenderNode(w, node, entering) | ||||
| 	case blackfriday.Text: | ||||
| 		isListItem := false | ||||
| 		for n := node.Parent; n != nil; n = n.Parent { | ||||
| 			if n.Type == blackfriday.Item { | ||||
| 				isListItem = true | ||||
| 				break | ||||
| 			} | ||||
| 		} | ||||
| 		if isListItem { | ||||
| 			text := node.Literal | ||||
| 			switch { | ||||
| 			case bytes.HasPrefix(text, []byte("[ ] ")): | ||||
| 				_, _ = w.Write([]byte(`<span class="ui fitted disabled checkbox"><input type="checkbox" disabled="disabled" /><label /></span>`)) | ||||
| 				text = text[3:] | ||||
| 			case bytes.HasPrefix(text, []byte("[x] ")): | ||||
| 				_, _ = w.Write([]byte(`<span class="ui checked fitted disabled checkbox"><input type="checkbox" checked="" disabled="disabled" /><label /></span>`)) | ||||
| 				text = text[3:] | ||||
| 			} | ||||
| 			node.Literal = text | ||||
| 		} | ||||
| 	} | ||||
| 	r.Renderer.ListItem(out, text, flags) | ||||
| } | ||||
|  | ||||
| // Image defines how images should be processed to produce corresponding HTML elements. | ||||
| func (r *Renderer) Image(out *bytes.Buffer, link []byte, title []byte, alt []byte) { | ||||
| 	prefix := r.URLPrefix | ||||
| 	if r.IsWiki { | ||||
| 		prefix = util.URLJoin(prefix, "wiki", "raw") | ||||
| 	} | ||||
| 	prefix = strings.Replace(prefix, "/src/", "/media/", 1) | ||||
| 	if len(link) > 0 && !markup.IsLink(link) { | ||||
| 		lnk := string(link) | ||||
| 		lnk = util.URLJoin(prefix, lnk) | ||||
| 		lnk = strings.Replace(lnk, " ", "+", -1) | ||||
| 		link = []byte(lnk) | ||||
| 	} | ||||
|  | ||||
| 	// Put a link around it pointing to itself by default | ||||
| 	out.WriteString(`<a href="`) | ||||
| 	out.Write(link) | ||||
| 	out.WriteString(`">`) | ||||
| 	r.Renderer.Image(out, link, title, alt) | ||||
| 	out.WriteString("</a>") | ||||
| 	return r.Renderer.RenderNode(w, node, entering) | ||||
| } | ||||
|  | ||||
| const ( | ||||
| 	blackfridayExtensions = 0 | | ||||
| 		blackfriday.EXTENSION_NO_INTRA_EMPHASIS | | ||||
| 		blackfriday.EXTENSION_TABLES | | ||||
| 		blackfriday.EXTENSION_FENCED_CODE | | ||||
| 		blackfriday.EXTENSION_STRIKETHROUGH | | ||||
| 		blackfriday.EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK | | ||||
| 		blackfriday.EXTENSION_DEFINITION_LISTS | | ||||
| 		blackfriday.EXTENSION_FOOTNOTES | | ||||
| 		blackfriday.EXTENSION_HEADER_IDS | | ||||
| 		blackfriday.EXTENSION_AUTO_HEADER_IDS | ||||
| 		blackfriday.NoIntraEmphasis | | ||||
| 		blackfriday.Tables | | ||||
| 		blackfriday.FencedCode | | ||||
| 		blackfriday.Strikethrough | | ||||
| 		blackfriday.NoEmptyLineBeforeBlock | | ||||
| 		blackfriday.DefinitionLists | | ||||
| 		blackfriday.Footnotes | | ||||
| 		blackfriday.HeadingIDs | | ||||
| 		blackfriday.AutoHeadingIDs | ||||
| 	blackfridayHTMLFlags = 0 | | ||||
| 		blackfriday.HTML_SKIP_STYLE | | ||||
| 		blackfriday.HTML_OMIT_CONTENTS | | ||||
| 		blackfriday.HTML_USE_SMARTYPANTS | ||||
| 		blackfriday.Smartypants | ||||
| ) | ||||
|  | ||||
| // RenderRaw renders Markdown to HTML without handling special links. | ||||
| func RenderRaw(body []byte, urlPrefix string, wikiMarkdown bool) []byte { | ||||
| 	renderer := &Renderer{ | ||||
| 		Renderer:  blackfriday.HtmlRenderer(blackfridayHTMLFlags, "", ""), | ||||
| 		Renderer: blackfriday.NewHTMLRenderer(blackfriday.HTMLRendererParameters{ | ||||
| 			Flags: blackfridayHTMLFlags, | ||||
| 		}), | ||||
| 		URLPrefix: urlPrefix, | ||||
| 		IsWiki:    wikiMarkdown, | ||||
| 	} | ||||
|  | ||||
| 	exts := blackfridayExtensions | ||||
| 	if setting.Markdown.EnableHardLineBreak { | ||||
| 		exts |= blackfriday.EXTENSION_HARD_LINE_BREAK | ||||
| 		exts |= blackfriday.HardLineBreak | ||||
| 	} | ||||
|  | ||||
| 	body = blackfriday.Markdown(body, renderer, exts) | ||||
| 	body = blackfriday.Run(body, blackfriday.WithRenderer(renderer), blackfriday.WithExtensions(exts)) | ||||
| 	return markup.SanitizeBytes(body) | ||||
| } | ||||
|  | ||||
|   | ||||
| @@ -166,13 +166,13 @@ func testAnswers(baseURLContent, baseURLImages string) []string { | ||||
| <h3 id="footnotes">Footnotes</h3> | ||||
|  | ||||
| <p>Here is a simple footnote,<sup id="fnref:1"><a href="#fn:1" rel="nofollow">1</a></sup> and here is a longer one.<sup id="fnref:bignote"><a href="#fn:bignote" rel="nofollow">2</a></sup></p> | ||||
|  | ||||
| <div> | ||||
|  | ||||
| <hr/> | ||||
|  | ||||
| <ol> | ||||
| <li id="fn:1">This is the first footnote. | ||||
| </li> | ||||
| <li id="fn:1">This is the first footnote.</li> | ||||
|  | ||||
| <li id="fn:bignote"><p>Here is one with multiple paragraphs and code.</p> | ||||
|  | ||||
| @@ -180,9 +180,9 @@ func testAnswers(baseURLContent, baseURLImages string) []string { | ||||
|  | ||||
| <p><code>{ my code }</code></p> | ||||
|  | ||||
| <p>Add as many paragraphs as you like.</p> | ||||
| </li> | ||||
| <p>Add as many paragraphs as you like.</p></li> | ||||
| </ol> | ||||
|  | ||||
| </div> | ||||
| `, | ||||
| 	} | ||||
|   | ||||
| @@ -6,43 +6,39 @@ package mdstripper | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"io" | ||||
|  | ||||
| 	"github.com/russross/blackfriday" | ||||
| 	"github.com/russross/blackfriday/v2" | ||||
| ) | ||||
|  | ||||
| // MarkdownStripper extends blackfriday.Renderer | ||||
| type MarkdownStripper struct { | ||||
| 	blackfriday.Renderer | ||||
| 	links     []string | ||||
| 	coallesce bool | ||||
| 	empty     bool | ||||
| } | ||||
|  | ||||
| const ( | ||||
| 	blackfridayExtensions = 0 | | ||||
| 		blackfriday.EXTENSION_NO_INTRA_EMPHASIS | | ||||
| 		blackfriday.EXTENSION_TABLES | | ||||
| 		blackfriday.EXTENSION_FENCED_CODE | | ||||
| 		blackfriday.EXTENSION_STRIKETHROUGH | | ||||
| 		blackfriday.EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK | | ||||
| 		blackfriday.EXTENSION_DEFINITION_LISTS | | ||||
| 		blackfriday.EXTENSION_FOOTNOTES | | ||||
| 		blackfriday.EXTENSION_HEADER_IDS | | ||||
| 		blackfriday.EXTENSION_AUTO_HEADER_IDS | | ||||
| 		blackfriday.NoIntraEmphasis | | ||||
| 		blackfriday.Tables | | ||||
| 		blackfriday.FencedCode | | ||||
| 		blackfriday.Strikethrough | | ||||
| 		blackfriday.NoEmptyLineBeforeBlock | | ||||
| 		blackfriday.DefinitionLists | | ||||
| 		blackfriday.Footnotes | | ||||
| 		blackfriday.HeadingIDs | | ||||
| 		blackfriday.AutoHeadingIDs | | ||||
| 		// Not included in modules/markup/markdown/markdown.go; | ||||
| 		// required here to process inline links | ||||
| 		blackfriday.EXTENSION_AUTOLINK | ||||
| 		blackfriday.Autolink | ||||
| ) | ||||
|  | ||||
| //revive:disable:var-naming Implementing the Rendering interface requires breaking some linting rules | ||||
|  | ||||
| // StripMarkdown parses markdown content by removing all markup and code blocks | ||||
| //	in order to extract links and other references | ||||
| func StripMarkdown(rawBytes []byte) (string, []string) { | ||||
| 	stripper := &MarkdownStripper{ | ||||
| 		links: make([]string, 0, 10), | ||||
| 	} | ||||
| 	body := blackfriday.Markdown(rawBytes, stripper, blackfridayExtensions) | ||||
| 	return string(body), stripper.GetLinks() | ||||
| 	buf, links := StripMarkdownBytes(rawBytes) | ||||
| 	return string(buf), links | ||||
| } | ||||
|  | ||||
| // StripMarkdownBytes parses markdown content by removing all markup and code blocks | ||||
| @@ -50,205 +46,67 @@ func StripMarkdown(rawBytes []byte) (string, []string) { | ||||
| func StripMarkdownBytes(rawBytes []byte) ([]byte, []string) { | ||||
| 	stripper := &MarkdownStripper{ | ||||
| 		links: make([]string, 0, 10), | ||||
| 		empty: true, | ||||
| 	} | ||||
| 	body := blackfriday.Markdown(rawBytes, stripper, blackfridayExtensions) | ||||
| 	return body, stripper.GetLinks() | ||||
|  | ||||
| 	parser := blackfriday.New(blackfriday.WithRenderer(stripper), blackfriday.WithExtensions(blackfridayExtensions)) | ||||
| 	ast := parser.Parse(rawBytes) | ||||
| 	var buf bytes.Buffer | ||||
| 	stripper.RenderHeader(&buf, ast) | ||||
| 	ast.Walk(func(node *blackfriday.Node, entering bool) blackfriday.WalkStatus { | ||||
| 		return stripper.RenderNode(&buf, node, entering) | ||||
| 	}) | ||||
| 	stripper.RenderFooter(&buf, ast) | ||||
| 	return buf.Bytes(), stripper.GetLinks() | ||||
| } | ||||
|  | ||||
| // block-level callbacks | ||||
|  | ||||
| // BlockCode dummy function to proceed with rendering | ||||
| func (r *MarkdownStripper) BlockCode(out *bytes.Buffer, text []byte, infoString string) { | ||||
| 	// Not rendered | ||||
| // RenderNode is the main rendering method. It will be called once for | ||||
| // every leaf node and twice for every non-leaf node (first with | ||||
| // entering=true, then with entering=false). The method should write its | ||||
| // rendition of the node to the supplied writer w. | ||||
| func (r *MarkdownStripper) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus { | ||||
| 	if !entering { | ||||
| 		return blackfriday.GoToNext | ||||
| 	} | ||||
| 	switch node.Type { | ||||
| 	case blackfriday.Text: | ||||
| 		r.processString(w, node.Literal, node.Parent == nil) | ||||
| 		return blackfriday.GoToNext | ||||
| 	case blackfriday.Link: | ||||
| 		r.processLink(w, node.LinkData.Destination) | ||||
| 		r.coallesce = false | ||||
| 		return blackfriday.SkipChildren | ||||
| 	} | ||||
| 	r.coallesce = false | ||||
| 	return blackfriday.GoToNext | ||||
| } | ||||
|  | ||||
| // BlockQuote dummy function to proceed with rendering | ||||
| func (r *MarkdownStripper) BlockQuote(out *bytes.Buffer, text []byte) { | ||||
| 	// FIXME: perhaps it's better to leave out block quote for this? | ||||
| 	r.processString(out, text, false) | ||||
| // RenderHeader is a method that allows the renderer to produce some | ||||
| // content preceding the main body of the output document. | ||||
| func (r *MarkdownStripper) RenderHeader(w io.Writer, ast *blackfriday.Node) { | ||||
| } | ||||
|  | ||||
| // BlockHtml dummy function to proceed with rendering | ||||
| func (r *MarkdownStripper) BlockHtml(out *bytes.Buffer, text []byte) { //nolint | ||||
| 	// Not rendered | ||||
| 	r.coallesce = false | ||||
| // RenderFooter is a symmetric counterpart of RenderHeader. | ||||
| func (r *MarkdownStripper) RenderFooter(w io.Writer, ast *blackfriday.Node) { | ||||
| } | ||||
|  | ||||
| // Header dummy function to proceed with rendering | ||||
| func (r *MarkdownStripper) Header(out *bytes.Buffer, text func() bool, level int, id string) { | ||||
| 	text() | ||||
| 	r.coallesce = false | ||||
| } | ||||
|  | ||||
| // HRule dummy function to proceed with rendering | ||||
| func (r *MarkdownStripper) HRule(out *bytes.Buffer) { | ||||
| 	// Not rendered | ||||
| 	r.coallesce = false | ||||
| } | ||||
|  | ||||
| // List dummy function to proceed with rendering | ||||
| func (r *MarkdownStripper) List(out *bytes.Buffer, text func() bool, flags int) { | ||||
| 	text() | ||||
| 	r.coallesce = false | ||||
| } | ||||
|  | ||||
| // ListItem dummy function to proceed with rendering | ||||
| func (r *MarkdownStripper) ListItem(out *bytes.Buffer, text []byte, flags int) { | ||||
| 	r.processString(out, text, false) | ||||
| } | ||||
|  | ||||
| // Paragraph dummy function to proceed with rendering | ||||
| func (r *MarkdownStripper) Paragraph(out *bytes.Buffer, text func() bool) { | ||||
| 	text() | ||||
| 	r.coallesce = false | ||||
| } | ||||
|  | ||||
| // Table dummy function to proceed with rendering | ||||
| func (r *MarkdownStripper) Table(out *bytes.Buffer, header []byte, body []byte, columnData []int) { | ||||
| 	r.processString(out, header, false) | ||||
| 	r.processString(out, body, false) | ||||
| } | ||||
|  | ||||
| // TableRow dummy function to proceed with rendering | ||||
| func (r *MarkdownStripper) TableRow(out *bytes.Buffer, text []byte) { | ||||
| 	r.processString(out, text, false) | ||||
| } | ||||
|  | ||||
| // TableHeaderCell dummy function to proceed with rendering | ||||
| func (r *MarkdownStripper) TableHeaderCell(out *bytes.Buffer, text []byte, flags int) { | ||||
| 	r.processString(out, text, false) | ||||
| } | ||||
|  | ||||
| // TableCell dummy function to proceed with rendering | ||||
| func (r *MarkdownStripper) TableCell(out *bytes.Buffer, text []byte, flags int) { | ||||
| 	r.processString(out, text, false) | ||||
| } | ||||
|  | ||||
| // Footnotes dummy function to proceed with rendering | ||||
| func (r *MarkdownStripper) Footnotes(out *bytes.Buffer, text func() bool) { | ||||
| 	text() | ||||
| } | ||||
|  | ||||
| // FootnoteItem dummy function to proceed with rendering | ||||
| func (r *MarkdownStripper) FootnoteItem(out *bytes.Buffer, name, text []byte, flags int) { | ||||
| 	r.processString(out, text, false) | ||||
| } | ||||
|  | ||||
| // TitleBlock dummy function to proceed with rendering | ||||
| func (r *MarkdownStripper) TitleBlock(out *bytes.Buffer, text []byte) { | ||||
| 	r.processString(out, text, false) | ||||
| } | ||||
|  | ||||
| // Span-level callbacks | ||||
|  | ||||
| // AutoLink dummy function to proceed with rendering | ||||
| func (r *MarkdownStripper) AutoLink(out *bytes.Buffer, link []byte, kind int) { | ||||
| 	r.processLink(out, link, []byte{}) | ||||
| } | ||||
|  | ||||
| // CodeSpan dummy function to proceed with rendering | ||||
| func (r *MarkdownStripper) CodeSpan(out *bytes.Buffer, text []byte) { | ||||
| 	// Not rendered | ||||
| 	r.coallesce = false | ||||
| } | ||||
|  | ||||
| // DoubleEmphasis dummy function to proceed with rendering | ||||
| func (r *MarkdownStripper) DoubleEmphasis(out *bytes.Buffer, text []byte) { | ||||
| 	r.processString(out, text, false) | ||||
| } | ||||
|  | ||||
| // Emphasis dummy function to proceed with rendering | ||||
| func (r *MarkdownStripper) Emphasis(out *bytes.Buffer, text []byte) { | ||||
| 	r.processString(out, text, false) | ||||
| } | ||||
|  | ||||
| // Image dummy function to proceed with rendering | ||||
| func (r *MarkdownStripper) Image(out *bytes.Buffer, link []byte, title []byte, alt []byte) { | ||||
| 	// Not rendered | ||||
| 	r.coallesce = false | ||||
| } | ||||
|  | ||||
| // LineBreak dummy function to proceed with rendering | ||||
| func (r *MarkdownStripper) LineBreak(out *bytes.Buffer) { | ||||
| 	// Not rendered | ||||
| 	r.coallesce = false | ||||
| } | ||||
|  | ||||
| // Link dummy function to proceed with rendering | ||||
| func (r *MarkdownStripper) Link(out *bytes.Buffer, link []byte, title []byte, content []byte) { | ||||
| 	r.processLink(out, link, content) | ||||
| } | ||||
|  | ||||
| // RawHtmlTag dummy function to proceed with rendering | ||||
| func (r *MarkdownStripper) RawHtmlTag(out *bytes.Buffer, tag []byte) { //nolint | ||||
| 	// Not rendered | ||||
| 	r.coallesce = false | ||||
| } | ||||
|  | ||||
| // TripleEmphasis dummy function to proceed with rendering | ||||
| func (r *MarkdownStripper) TripleEmphasis(out *bytes.Buffer, text []byte) { | ||||
| 	r.processString(out, text, false) | ||||
| } | ||||
|  | ||||
| // StrikeThrough dummy function to proceed with rendering | ||||
| func (r *MarkdownStripper) StrikeThrough(out *bytes.Buffer, text []byte) { | ||||
| 	r.processString(out, text, false) | ||||
| } | ||||
|  | ||||
| // FootnoteRef dummy function to proceed with rendering | ||||
| func (r *MarkdownStripper) FootnoteRef(out *bytes.Buffer, ref []byte, id int) { | ||||
| 	// Not rendered | ||||
| 	r.coallesce = false | ||||
| } | ||||
|  | ||||
| // Low-level callbacks | ||||
|  | ||||
| // Entity dummy function to proceed with rendering | ||||
| func (r *MarkdownStripper) Entity(out *bytes.Buffer, entity []byte) { | ||||
| 	// FIXME: literal entities are not parsed; perhaps they should | ||||
| 	r.coallesce = false | ||||
| } | ||||
|  | ||||
| // NormalText dummy function to proceed with rendering | ||||
| func (r *MarkdownStripper) NormalText(out *bytes.Buffer, text []byte) { | ||||
| 	r.processString(out, text, true) | ||||
| } | ||||
|  | ||||
| // Header and footer | ||||
|  | ||||
| // DocumentHeader dummy function to proceed with rendering | ||||
| func (r *MarkdownStripper) DocumentHeader(out *bytes.Buffer) { | ||||
| 	r.coallesce = false | ||||
| } | ||||
|  | ||||
| // DocumentFooter dummy function to proceed with rendering | ||||
| func (r *MarkdownStripper) DocumentFooter(out *bytes.Buffer) { | ||||
| 	r.coallesce = false | ||||
| } | ||||
|  | ||||
| // GetFlags returns rendering flags | ||||
| func (r *MarkdownStripper) GetFlags() int { | ||||
| 	return 0 | ||||
| } | ||||
|  | ||||
| //revive:enable:var-naming | ||||
|  | ||||
| func doubleSpace(out *bytes.Buffer) { | ||||
| 	if out.Len() > 0 { | ||||
| 		out.WriteByte('\n') | ||||
| func (r *MarkdownStripper) doubleSpace(w io.Writer) { | ||||
| 	if !r.empty { | ||||
| 		_, _ = w.Write([]byte{'\n'}) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (r *MarkdownStripper) processString(out *bytes.Buffer, text []byte, coallesce bool) { | ||||
| func (r *MarkdownStripper) processString(w io.Writer, text []byte, coallesce bool) { | ||||
| 	// Always break-up words | ||||
| 	if !coallesce || !r.coallesce { | ||||
| 		doubleSpace(out) | ||||
| 		r.doubleSpace(w) | ||||
| 	} | ||||
| 	out.Write(text) | ||||
| 	_, _ = w.Write(text) | ||||
| 	r.coallesce = coallesce | ||||
| 	r.empty = false | ||||
| } | ||||
| func (r *MarkdownStripper) processLink(out *bytes.Buffer, link []byte, content []byte) { | ||||
|  | ||||
| func (r *MarkdownStripper) processLink(w io.Writer, link []byte) { | ||||
| 	// Links are processed out of band | ||||
| 	r.links = append(r.links, string(link)) | ||||
| 	r.coallesce = false | ||||
|   | ||||
| @@ -5,12 +5,16 @@ | ||||
| package markup | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"fmt" | ||||
| 	"html" | ||||
| 	"strings" | ||||
|  | ||||
| 	"code.gitea.io/gitea/modules/log" | ||||
| 	"code.gitea.io/gitea/modules/markup" | ||||
| 	"code.gitea.io/gitea/modules/markup/markdown" | ||||
| 	"code.gitea.io/gitea/modules/util" | ||||
|  | ||||
| 	"github.com/chaseadamsio/goorgeous" | ||||
| 	"github.com/russross/blackfriday" | ||||
| 	"github.com/niklasfasching/go-org/org" | ||||
| ) | ||||
|  | ||||
| func init() { | ||||
| @@ -32,23 +36,23 @@ func (Parser) Extensions() []string { | ||||
| } | ||||
|  | ||||
| // Render renders orgmode rawbytes to HTML | ||||
| func Render(rawBytes []byte, urlPrefix string, metas map[string]string, isWiki bool) (result []byte) { | ||||
| 	defer func() { | ||||
| 		if err := recover(); err != nil { | ||||
| 			log.Error("Panic in orgmode.Render: %v Just returning the rawBytes", err) | ||||
| 			result = rawBytes | ||||
| 		} | ||||
| 	}() | ||||
| 	htmlFlags := blackfriday.HTML_USE_XHTML | ||||
| 	htmlFlags |= blackfriday.HTML_SKIP_STYLE | ||||
| 	htmlFlags |= blackfriday.HTML_OMIT_CONTENTS | ||||
| 	renderer := &markdown.Renderer{ | ||||
| 		Renderer:  blackfriday.HtmlRenderer(htmlFlags, "", ""), | ||||
| 		URLPrefix: urlPrefix, | ||||
| 		IsWiki:    isWiki, | ||||
| func Render(rawBytes []byte, urlPrefix string, metas map[string]string, isWiki bool) []byte { | ||||
| 	htmlWriter := org.NewHTMLWriter() | ||||
|  | ||||
| 	renderer := &Renderer{ | ||||
| 		HTMLWriter: htmlWriter, | ||||
| 		URLPrefix:  urlPrefix, | ||||
| 		IsWiki:     isWiki, | ||||
| 	} | ||||
| 	result = goorgeous.Org(rawBytes, renderer) | ||||
| 	return | ||||
|  | ||||
| 	htmlWriter.ExtendingWriter = renderer | ||||
|  | ||||
| 	res, err := org.New().Silent().Parse(bytes.NewReader(rawBytes), "").Write(renderer) | ||||
| 	if err != nil { | ||||
| 		log.Error("Panic in orgmode.Render: %v Just returning the rawBytes", err) | ||||
| 		return rawBytes | ||||
| 	} | ||||
| 	return []byte(res) | ||||
| } | ||||
|  | ||||
| // RenderString reners orgmode string to HTML string | ||||
| @@ -56,7 +60,63 @@ func RenderString(rawContent string, urlPrefix string, metas map[string]string, | ||||
| 	return string(Render([]byte(rawContent), urlPrefix, metas, isWiki)) | ||||
| } | ||||
|  | ||||
| // Render implements markup.Parser | ||||
| // Render reners orgmode string to HTML string | ||||
| func (Parser) Render(rawBytes []byte, urlPrefix string, metas map[string]string, isWiki bool) []byte { | ||||
| 	return Render(rawBytes, urlPrefix, metas, isWiki) | ||||
| } | ||||
|  | ||||
| // Renderer implements org.Writer | ||||
| type Renderer struct { | ||||
| 	*org.HTMLWriter | ||||
| 	URLPrefix string | ||||
| 	IsWiki    bool | ||||
| } | ||||
|  | ||||
| var byteMailto = []byte("mailto:") | ||||
|  | ||||
| // WriteRegularLink renders images, links or videos | ||||
| func (r *Renderer) WriteRegularLink(l org.RegularLink) { | ||||
| 	link := []byte(html.EscapeString(l.URL)) | ||||
| 	if l.Protocol == "file" { | ||||
| 		link = link[len("file:"):] | ||||
| 	} | ||||
| 	if len(link) > 0 && !markup.IsLink(link) && | ||||
| 		link[0] != '#' && !bytes.HasPrefix(link, byteMailto) { | ||||
| 		lnk := string(link) | ||||
| 		if r.IsWiki { | ||||
| 			lnk = util.URLJoin("wiki", lnk) | ||||
| 		} | ||||
| 		link = []byte(util.URLJoin(r.URLPrefix, lnk)) | ||||
| 	} | ||||
|  | ||||
| 	description := string(link) | ||||
| 	if l.Description != nil { | ||||
| 		description = r.nodesAsString(l.Description...) | ||||
| 	} | ||||
| 	switch l.Kind() { | ||||
| 	case "image": | ||||
| 		r.WriteString(fmt.Sprintf(`<img src="%s" alt="%s" title="%s" />`, link, description, description)) | ||||
| 	case "video": | ||||
| 		r.WriteString(fmt.Sprintf(`<video src="%s" title="%s">%s</video>`, link, description, description)) | ||||
| 	default: | ||||
| 		r.WriteString(fmt.Sprintf(`<a href="%s" title="%s">%s</a>`, link, description, description)) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (r *Renderer) emptyClone() *Renderer { | ||||
| 	wcopy := *(r.HTMLWriter) | ||||
| 	wcopy.Builder = strings.Builder{} | ||||
|  | ||||
| 	rcopy := *r | ||||
| 	rcopy.HTMLWriter = &wcopy | ||||
|  | ||||
| 	wcopy.ExtendingWriter = &rcopy | ||||
|  | ||||
| 	return &rcopy | ||||
| } | ||||
|  | ||||
| func (r *Renderer) nodesAsString(nodes ...org.Node) string { | ||||
| 	tmp := r.emptyClone() | ||||
| 	org.WriteNodes(tmp, nodes...) | ||||
| 	return tmp.String() | ||||
| } | ||||
|   | ||||
| @@ -27,12 +27,12 @@ func TestRender_StandardLinks(t *testing.T) { | ||||
| 		assert.Equal(t, strings.TrimSpace(expected), strings.TrimSpace(buffer)) | ||||
| 	} | ||||
|  | ||||
| 	googleRendered := `<p><a href="https://google.com/" title="https://google.com/">https://google.com/</a></p>` | ||||
| 	googleRendered := "<p>\n<a href=\"https://google.com/\" title=\"https://google.com/\">https://google.com/</a>\n</p>" | ||||
| 	test("[[https://google.com/]]", googleRendered) | ||||
|  | ||||
| 	lnk := util.URLJoin(AppSubURL, "WikiPage") | ||||
| 	test("[[WikiPage][WikiPage]]", | ||||
| 		`<p><a href="`+lnk+`" title="WikiPage">WikiPage</a></p>`) | ||||
| 		"<p>\n<a href=\""+lnk+"\" title=\"WikiPage\">WikiPage</a>\n</p>") | ||||
| } | ||||
|  | ||||
| func TestRender_Images(t *testing.T) { | ||||
| @@ -45,10 +45,8 @@ func TestRender_Images(t *testing.T) { | ||||
| 	} | ||||
|  | ||||
| 	url := "../../.images/src/02/train.jpg" | ||||
| 	title := "Train" | ||||
| 	result := util.URLJoin(AppSubURL, url) | ||||
|  | ||||
| 	test( | ||||
| 		"[[file:"+url+"]["+title+"]]", | ||||
| 		`<p><a href="`+result+`"><img src="`+result+`" alt="`+title+`" title="`+title+`" /></a></p>`) | ||||
| 	test("[[file:"+url+"]]", | ||||
| 		"<p>\n<img src=\""+result+"\" alt=\""+result+"\" title=\""+result+"\" />\n</p>") | ||||
| } | ||||
|   | ||||
							
								
								
									
										1
									
								
								vendor/github.com/chaseadamsio/goorgeous/.gitignore
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								vendor/github.com/chaseadamsio/goorgeous/.gitignore
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -1 +0,0 @@ | ||||
| .DS_Store | ||||
							
								
								
									
										12
									
								
								vendor/github.com/chaseadamsio/goorgeous/.travis.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										12
									
								
								vendor/github.com/chaseadamsio/goorgeous/.travis.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -1,12 +0,0 @@ | ||||
| language: go | ||||
|  | ||||
| go: | ||||
|   - 1.7 | ||||
|  | ||||
| before_install: | ||||
|   - go get golang.org/x/tools/cmd/cover | ||||
|   - go get github.com/mattn/goveralls | ||||
|  | ||||
| script: | ||||
|   - go test -v -covermode=count -coverprofile=coverage.out | ||||
|   - $HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci | ||||
							
								
								
									
										66
									
								
								vendor/github.com/chaseadamsio/goorgeous/README.org
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										66
									
								
								vendor/github.com/chaseadamsio/goorgeous/README.org
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -1,66 +0,0 @@ | ||||
| #+TITLE: chaseadamsio/goorgeous | ||||
|  | ||||
| [[https://travis-ci.org/chaseadamsio/goorgeous.svg?branch=master]] | ||||
| [[https://coveralls.io/repos/github/chaseadamsio/goorgeous/badge.svg?branch=master]] | ||||
|  | ||||
| /goorgeous is a Go Org to HTML Parser./ | ||||
|  | ||||
| [[file:gopher_small.gif]]  | ||||
|  | ||||
| *Pronounced: Go? Org? Yes!* | ||||
|  | ||||
| #+BEGIN_QUOTE | ||||
| "Org mode is for keeping notes, maintaining TODO lists, planning projects, and authoring documents with a fast and effective plain-text system." | ||||
|  | ||||
| - [[orgmode.org]] | ||||
| #+END_QUOTE | ||||
|  | ||||
| The purpose of this package is to come as close as possible as parsing an =*.org= document into HTML, the same way one might publish [[http://orgmode.org/worg/org-tutorials/org-publish-html-tutorial.html][with org-publish-html from Emacs]].  | ||||
|  | ||||
| * Installation | ||||
|  | ||||
| #+BEGIN_SRC sh | ||||
|   go get -u github.com/chaseadamsio/goorgeous | ||||
| #+END_SRC | ||||
|  | ||||
| * Usage | ||||
|  | ||||
| ** Org Headers | ||||
|  | ||||
| To retrieve the headers from a =[]byte=, call =OrgHeaders= and it will return a =map[string]interface{}=:  | ||||
|  | ||||
| #+BEGIN_SRC go | ||||
|   input := "#+title: goorgeous\n* Some Headline\n" | ||||
|   out := goorgeous.OrgHeaders(input)  | ||||
| #+END_SRC | ||||
|  | ||||
| #+BEGIN_SRC go | ||||
|   map[string]interface{}{  | ||||
|           "title": "goorgeous" | ||||
|   } | ||||
| #+END_SRC | ||||
|  | ||||
| ** Org Content | ||||
|  | ||||
| After importing =github.com/chaseadamsio/goorgeous=, you can call =Org= with a =[]byte= and it will return an =html= version of the content as a =[]byte= | ||||
|  | ||||
| #+BEGIN_SRC go | ||||
|   input := "#+TITLE: goorgeous\n* Some Headline\n" | ||||
|   out := goorgeous.Org(input)  | ||||
| #+END_SRC | ||||
|  | ||||
| =out= will be: | ||||
|  | ||||
| #+BEGIN_SRC html | ||||
|   <h1>Some Headline</h1>/n | ||||
| #+END_SRC | ||||
|  | ||||
| * Why?  | ||||
|  | ||||
| First off, I've become an unapologetic user of Emacs & ever since finding =org-mode= I use it for anything having to do with writing content, organizing my life and keeping documentation of my days/weeks/months. | ||||
|  | ||||
| Although I like Emacs & =emacs-lisp=, I publish all of my html sites with [[https://gohugo.io][Hugo Static Site Generator]] and wanted to be able to write my content in =org-mode= in Emacs rather than markdown. | ||||
|  | ||||
| Hugo's implementation of templating and speed are unmatched, so the only way I knew for sure I could continue to use Hugo and write in =org-mode= seamlessly was to write a golang parser for org content and submit a PR for Hugo to use it. | ||||
| * Acknowledgements | ||||
| I leaned heavily on russross' [[https://github.com/russross/blackfriday][blackfriday markdown renderer]] as both an example of how to write a parser (with some updates to leverage the go we know today) and reusing the blackfriday HTML Renderer so I didn't have to write my own! | ||||
							
								
								
									
										803
									
								
								vendor/github.com/chaseadamsio/goorgeous/goorgeous.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										803
									
								
								vendor/github.com/chaseadamsio/goorgeous/goorgeous.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -1,803 +0,0 @@ | ||||
| package goorgeous | ||||
|  | ||||
| import ( | ||||
| 	"bufio" | ||||
| 	"bytes" | ||||
| 	"regexp" | ||||
|  | ||||
| 	"github.com/russross/blackfriday" | ||||
| 	"github.com/shurcooL/sanitized_anchor_name" | ||||
| ) | ||||
|  | ||||
| type inlineParser func(p *parser, out *bytes.Buffer, data []byte, offset int) int | ||||
|  | ||||
| type footnotes struct { | ||||
| 	id  string | ||||
| 	def string | ||||
| } | ||||
|  | ||||
| type parser struct { | ||||
| 	r              blackfriday.Renderer | ||||
| 	inlineCallback [256]inlineParser | ||||
| 	notes          []footnotes | ||||
| } | ||||
|  | ||||
| // NewParser returns a new parser with the inlineCallbacks required for org content | ||||
| func NewParser(renderer blackfriday.Renderer) *parser { | ||||
| 	p := new(parser) | ||||
| 	p.r = renderer | ||||
|  | ||||
| 	p.inlineCallback['='] = generateVerbatim | ||||
| 	p.inlineCallback['~'] = generateCode | ||||
| 	p.inlineCallback['/'] = generateEmphasis | ||||
| 	p.inlineCallback['_'] = generateUnderline | ||||
| 	p.inlineCallback['*'] = generateBold | ||||
| 	p.inlineCallback['+'] = generateStrikethrough | ||||
| 	p.inlineCallback['['] = generateLinkOrImg | ||||
|  | ||||
| 	return p | ||||
| } | ||||
|  | ||||
| // OrgCommon is the easiest way to parse a byte slice of org content and makes assumptions | ||||
| // that the caller wants to use blackfriday's HTMLRenderer with XHTML | ||||
| func OrgCommon(input []byte) []byte { | ||||
| 	renderer := blackfriday.HtmlRenderer(blackfriday.HTML_USE_XHTML, "", "") | ||||
| 	return OrgOptions(input, renderer) | ||||
| } | ||||
|  | ||||
| // Org is a convenience name for OrgOptions | ||||
| func Org(input []byte, renderer blackfriday.Renderer) []byte { | ||||
| 	return OrgOptions(input, renderer) | ||||
| } | ||||
|  | ||||
| // OrgOptions takes an org content byte slice and a renderer to use | ||||
| func OrgOptions(input []byte, renderer blackfriday.Renderer) []byte { | ||||
| 	// in the case that we need to render something in isEmpty but there isn't a new line char | ||||
| 	input = append(input, '\n') | ||||
| 	var output bytes.Buffer | ||||
|  | ||||
| 	p := NewParser(renderer) | ||||
|  | ||||
| 	scanner := bufio.NewScanner(bytes.NewReader(input)) | ||||
| 	// used to capture code blocks | ||||
| 	marker := "" | ||||
| 	syntax := "" | ||||
| 	listType := "" | ||||
| 	inParagraph := false | ||||
| 	inList := false | ||||
| 	inTable := false | ||||
| 	inFixedWidthArea := false | ||||
| 	var tmpBlock bytes.Buffer | ||||
|  | ||||
| 	for scanner.Scan() { | ||||
| 		data := scanner.Bytes() | ||||
|  | ||||
| 		if !isEmpty(data) && isComment(data) || IsKeyword(data) { | ||||
| 			switch { | ||||
| 			case inList: | ||||
| 				if tmpBlock.Len() > 0 { | ||||
| 					p.generateList(&output, tmpBlock.Bytes(), listType) | ||||
| 				} | ||||
| 				inList = false | ||||
| 				listType = "" | ||||
| 				tmpBlock.Reset() | ||||
| 			case inTable: | ||||
| 				if tmpBlock.Len() > 0 { | ||||
| 					p.generateTable(&output, tmpBlock.Bytes()) | ||||
| 				} | ||||
| 				inTable = false | ||||
| 				tmpBlock.Reset() | ||||
| 			case inParagraph: | ||||
| 				if tmpBlock.Len() > 0 { | ||||
| 					p.generateParagraph(&output, tmpBlock.Bytes()[:len(tmpBlock.Bytes())-1]) | ||||
| 				} | ||||
| 				inParagraph = false | ||||
| 				tmpBlock.Reset() | ||||
| 			case inFixedWidthArea: | ||||
| 				if tmpBlock.Len() > 0 { | ||||
| 					tmpBlock.WriteString("</pre>\n") | ||||
| 					output.Write(tmpBlock.Bytes()) | ||||
| 				} | ||||
| 				inFixedWidthArea = false | ||||
| 				tmpBlock.Reset() | ||||
| 			} | ||||
|  | ||||
| 		} | ||||
|  | ||||
| 		switch { | ||||
| 		case isEmpty(data): | ||||
| 			switch { | ||||
| 			case inList: | ||||
| 				if tmpBlock.Len() > 0 { | ||||
| 					p.generateList(&output, tmpBlock.Bytes(), listType) | ||||
| 				} | ||||
| 				inList = false | ||||
| 				listType = "" | ||||
| 				tmpBlock.Reset() | ||||
| 			case inTable: | ||||
| 				if tmpBlock.Len() > 0 { | ||||
| 					p.generateTable(&output, tmpBlock.Bytes()) | ||||
| 				} | ||||
| 				inTable = false | ||||
| 				tmpBlock.Reset() | ||||
| 			case inParagraph: | ||||
| 				if tmpBlock.Len() > 0 { | ||||
| 					p.generateParagraph(&output, tmpBlock.Bytes()[:len(tmpBlock.Bytes())-1]) | ||||
| 				} | ||||
| 				inParagraph = false | ||||
| 				tmpBlock.Reset() | ||||
| 			case inFixedWidthArea: | ||||
| 				if tmpBlock.Len() > 0 { | ||||
| 					tmpBlock.WriteString("</pre>\n") | ||||
| 					output.Write(tmpBlock.Bytes()) | ||||
| 				} | ||||
| 				inFixedWidthArea = false | ||||
| 				tmpBlock.Reset() | ||||
| 			case marker != "": | ||||
| 				tmpBlock.WriteByte('\n') | ||||
| 			default: | ||||
| 				continue | ||||
| 			} | ||||
| 		case isPropertyDrawer(data) || marker == "PROPERTIES": | ||||
| 			if marker == "" { | ||||
| 				marker = "PROPERTIES" | ||||
| 			} | ||||
| 			if bytes.Equal(data, []byte(":END:")) { | ||||
| 				marker = "" | ||||
| 			} | ||||
| 			continue | ||||
| 		case isBlock(data) || marker != "": | ||||
| 			matches := reBlock.FindSubmatch(data) | ||||
| 			if len(matches) > 0 { | ||||
| 				if string(matches[1]) == "END" { | ||||
| 					switch marker { | ||||
| 					case "QUOTE": | ||||
| 						var tmpBuf bytes.Buffer | ||||
| 						p.inline(&tmpBuf, tmpBlock.Bytes()) | ||||
| 						p.r.BlockQuote(&output, tmpBuf.Bytes()) | ||||
| 					case "CENTER": | ||||
| 						var tmpBuf bytes.Buffer | ||||
| 						output.WriteString("<center>\n") | ||||
| 						p.inline(&tmpBuf, tmpBlock.Bytes()) | ||||
| 						output.Write(tmpBuf.Bytes()) | ||||
| 						output.WriteString("</center>\n") | ||||
| 					default: | ||||
| 						tmpBlock.WriteByte('\n') | ||||
| 						p.r.BlockCode(&output, tmpBlock.Bytes(), syntax) | ||||
| 					} | ||||
| 					marker = "" | ||||
| 					tmpBlock.Reset() | ||||
| 					continue | ||||
| 				} | ||||
|  | ||||
| 			} | ||||
| 			if marker != "" { | ||||
| 				if marker != "SRC" && marker != "EXAMPLE" { | ||||
| 					var tmpBuf bytes.Buffer | ||||
| 					tmpBuf.Write([]byte("<p>\n")) | ||||
| 					p.inline(&tmpBuf, data) | ||||
| 					tmpBuf.WriteByte('\n') | ||||
| 					tmpBuf.Write([]byte("</p>\n")) | ||||
| 					tmpBlock.Write(tmpBuf.Bytes()) | ||||
|  | ||||
| 				} else { | ||||
| 					tmpBlock.WriteByte('\n') | ||||
| 					tmpBlock.Write(data) | ||||
| 				} | ||||
|  | ||||
| 			} else { | ||||
| 				marker = string(matches[2]) | ||||
| 				syntax = string(matches[3]) | ||||
| 			} | ||||
| 		case isFootnoteDef(data): | ||||
| 			matches := reFootnoteDef.FindSubmatch(data) | ||||
| 			for i := range p.notes { | ||||
| 				if p.notes[i].id == string(matches[1]) { | ||||
| 					p.notes[i].def = string(matches[2]) | ||||
| 				} | ||||
| 			} | ||||
| 		case isTable(data): | ||||
| 			if inTable != true { | ||||
| 				inTable = true | ||||
| 			} | ||||
| 			tmpBlock.Write(data) | ||||
| 			tmpBlock.WriteByte('\n') | ||||
| 		case IsKeyword(data): | ||||
| 			continue | ||||
| 		case isComment(data): | ||||
| 			p.generateComment(&output, data) | ||||
| 		case isHeadline(data): | ||||
| 			p.generateHeadline(&output, data) | ||||
| 		case isDefinitionList(data): | ||||
| 			if inList != true { | ||||
| 				listType = "dl" | ||||
| 				inList = true | ||||
| 			} | ||||
| 			var work bytes.Buffer | ||||
| 			flags := blackfriday.LIST_TYPE_DEFINITION | ||||
| 			matches := reDefinitionList.FindSubmatch(data) | ||||
| 			flags |= blackfriday.LIST_TYPE_TERM | ||||
| 			p.inline(&work, matches[1]) | ||||
| 			p.r.ListItem(&tmpBlock, work.Bytes(), flags) | ||||
| 			work.Reset() | ||||
| 			flags &= ^blackfriday.LIST_TYPE_TERM | ||||
| 			p.inline(&work, matches[2]) | ||||
| 			p.r.ListItem(&tmpBlock, work.Bytes(), flags) | ||||
| 		case isUnorderedList(data): | ||||
| 			if inList != true { | ||||
| 				listType = "ul" | ||||
| 				inList = true | ||||
| 			} | ||||
| 			matches := reUnorderedList.FindSubmatch(data) | ||||
| 			var work bytes.Buffer | ||||
| 			p.inline(&work, matches[2]) | ||||
| 			p.r.ListItem(&tmpBlock, work.Bytes(), 0) | ||||
| 		case isOrderedList(data): | ||||
| 			if inList != true { | ||||
| 				listType = "ol" | ||||
| 				inList = true | ||||
| 			} | ||||
| 			matches := reOrderedList.FindSubmatch(data) | ||||
| 			var work bytes.Buffer | ||||
| 			tmpBlock.WriteString("<li") | ||||
| 			if len(matches[2]) > 0 { | ||||
| 				tmpBlock.WriteString(" value=\"") | ||||
| 				tmpBlock.Write(matches[2]) | ||||
| 				tmpBlock.WriteString("\"") | ||||
| 				matches[3] = matches[3][1:] | ||||
| 			} | ||||
| 			p.inline(&work, matches[3]) | ||||
| 			tmpBlock.WriteString(">") | ||||
| 			tmpBlock.Write(work.Bytes()) | ||||
| 			tmpBlock.WriteString("</li>\n") | ||||
| 		case isHorizontalRule(data): | ||||
| 			p.r.HRule(&output) | ||||
| 		case isExampleLine(data): | ||||
| 			if inParagraph == true { | ||||
| 				if len(tmpBlock.Bytes()) > 0 { | ||||
| 					p.generateParagraph(&output, tmpBlock.Bytes()[:len(tmpBlock.Bytes())-1]) | ||||
| 					inParagraph = false | ||||
| 				} | ||||
| 				tmpBlock.Reset() | ||||
| 			} | ||||
| 			if inFixedWidthArea != true { | ||||
| 				tmpBlock.WriteString("<pre class=\"example\">\n") | ||||
| 				inFixedWidthArea = true | ||||
| 			} | ||||
| 			matches := reExampleLine.FindSubmatch(data) | ||||
| 			tmpBlock.Write(matches[1]) | ||||
| 			tmpBlock.WriteString("\n") | ||||
| 			break | ||||
| 		default: | ||||
| 			if inParagraph == false { | ||||
| 				inParagraph = true | ||||
| 				if inFixedWidthArea == true { | ||||
| 					if tmpBlock.Len() > 0 { | ||||
| 						tmpBlock.WriteString("</pre>") | ||||
| 						output.Write(tmpBlock.Bytes()) | ||||
| 					} | ||||
| 					inFixedWidthArea = false | ||||
| 					tmpBlock.Reset() | ||||
| 				} | ||||
| 			} | ||||
| 			tmpBlock.Write(data) | ||||
| 			tmpBlock.WriteByte('\n') | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if len(tmpBlock.Bytes()) > 0 { | ||||
| 		if inParagraph == true { | ||||
| 			p.generateParagraph(&output, tmpBlock.Bytes()[:len(tmpBlock.Bytes())-1]) | ||||
| 		} else if inFixedWidthArea == true { | ||||
| 			tmpBlock.WriteString("</pre>\n") | ||||
| 			output.Write(tmpBlock.Bytes()) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// Writing footnote def. list | ||||
| 	if len(p.notes) > 0 { | ||||
| 		flags := blackfriday.LIST_ITEM_BEGINNING_OF_LIST | ||||
| 		p.r.Footnotes(&output, func() bool { | ||||
| 			for i := range p.notes { | ||||
| 				p.r.FootnoteItem(&output, []byte(p.notes[i].id), []byte(p.notes[i].def), flags) | ||||
| 			} | ||||
| 			return true | ||||
| 		}) | ||||
| 	} | ||||
|  | ||||
| 	return output.Bytes() | ||||
| } | ||||
|  | ||||
| // Org Syntax has been broken up into 4 distinct sections based on | ||||
| // the org-syntax draft (http://orgmode.org/worg/dev/org-syntax.html): | ||||
| // - Headlines | ||||
| // - Greater Elements | ||||
| // - Elements | ||||
| // - Objects | ||||
|  | ||||
| // Headlines | ||||
| func isHeadline(data []byte) bool { | ||||
| 	if !charMatches(data[0], '*') { | ||||
| 		return false | ||||
| 	} | ||||
| 	level := 0 | ||||
| 	for level < 6 && charMatches(data[level], '*') { | ||||
| 		level++ | ||||
| 	} | ||||
| 	return charMatches(data[level], ' ') | ||||
| } | ||||
|  | ||||
| func (p *parser) generateHeadline(out *bytes.Buffer, data []byte) { | ||||
| 	level := 1 | ||||
| 	status := "" | ||||
| 	priority := "" | ||||
|  | ||||
| 	for level < 6 && data[level] == '*' { | ||||
| 		level++ | ||||
| 	} | ||||
|  | ||||
| 	start := skipChar(data, level, ' ') | ||||
|  | ||||
| 	data = data[start:] | ||||
| 	i := 0 | ||||
|  | ||||
| 	// Check if has a status so it can be rendered as a separate span that can be hidden or | ||||
| 	// modified with CSS classes | ||||
| 	if hasStatus(data[i:4]) { | ||||
| 		status = string(data[i:4]) | ||||
| 		i += 5 // one extra character for the next whitespace | ||||
| 	} | ||||
|  | ||||
| 	// Check if the next byte is a priority marker | ||||
| 	if data[i] == '[' && hasPriority(data[i+1]) { | ||||
| 		priority = string(data[i+1]) | ||||
| 		i += 4 // for "[c]" + ' ' | ||||
| 	} | ||||
|  | ||||
| 	tags, tagsFound := findTags(data, i) | ||||
|  | ||||
| 	headlineID := sanitized_anchor_name.Create(string(data[i:])) | ||||
|  | ||||
| 	generate := func() bool { | ||||
| 		dataEnd := len(data) | ||||
| 		if tagsFound > 0 { | ||||
| 			dataEnd = tagsFound | ||||
| 		} | ||||
|  | ||||
| 		headline := bytes.TrimRight(data[i:dataEnd], " \t") | ||||
|  | ||||
| 		if status != "" { | ||||
| 			out.WriteString("<span class=\"todo " + status + "\">" + status + "</span>") | ||||
| 			out.WriteByte(' ') | ||||
| 		} | ||||
|  | ||||
| 		if priority != "" { | ||||
| 			out.WriteString("<span class=\"priority " + priority + "\">[" + priority + "]</span>") | ||||
| 			out.WriteByte(' ') | ||||
| 		} | ||||
|  | ||||
| 		p.inline(out, headline) | ||||
|  | ||||
| 		if tagsFound > 0 { | ||||
| 			for _, tag := range tags { | ||||
| 				out.WriteByte(' ') | ||||
| 				out.WriteString("<span class=\"tags " + tag + "\">" + tag + "</span>") | ||||
| 				out.WriteByte(' ') | ||||
| 			} | ||||
| 		} | ||||
| 		return true | ||||
| 	} | ||||
|  | ||||
| 	p.r.Header(out, generate, level, headlineID) | ||||
| } | ||||
|  | ||||
| func hasStatus(data []byte) bool { | ||||
| 	return bytes.Contains(data, []byte("TODO")) || bytes.Contains(data, []byte("DONE")) | ||||
| } | ||||
|  | ||||
| func hasPriority(char byte) bool { | ||||
| 	return (charMatches(char, 'A') || charMatches(char, 'B') || charMatches(char, 'C')) | ||||
| } | ||||
|  | ||||
| func findTags(data []byte, start int) ([]string, int) { | ||||
| 	tags := []string{} | ||||
| 	tagOpener := 0 | ||||
| 	tagMarker := tagOpener | ||||
| 	for tIdx := start; tIdx < len(data); tIdx++ { | ||||
| 		if tagMarker > 0 && data[tIdx] == ':' { | ||||
| 			tags = append(tags, string(data[tagMarker+1:tIdx])) | ||||
| 			tagMarker = tIdx | ||||
| 		} | ||||
| 		if data[tIdx] == ':' && tagOpener == 0 && data[tIdx-1] == ' ' { | ||||
| 			tagMarker = tIdx | ||||
| 			tagOpener = tIdx | ||||
| 		} | ||||
| 	} | ||||
| 	return tags, tagOpener | ||||
| } | ||||
|  | ||||
| // Greater Elements | ||||
| // ~~ Definition Lists | ||||
| var reDefinitionList = regexp.MustCompile(`^\s*-\s+(.+?)\s+::\s+(.*)`) | ||||
|  | ||||
| func isDefinitionList(data []byte) bool { | ||||
| 	return reDefinitionList.Match(data) | ||||
| } | ||||
|  | ||||
| // ~~ Example lines | ||||
| var reExampleLine = regexp.MustCompile(`^\s*:\s(\s*.*)|^\s*:$`) | ||||
|  | ||||
| func isExampleLine(data []byte) bool { | ||||
| 	return reExampleLine.Match(data) | ||||
| } | ||||
|  | ||||
| // ~~ Ordered Lists | ||||
| var reOrderedList = regexp.MustCompile(`^(\s*)\d+\.\s+\[?@?(\d*)\]?(.+)`) | ||||
|  | ||||
| func isOrderedList(data []byte) bool { | ||||
| 	return reOrderedList.Match(data) | ||||
| } | ||||
|  | ||||
| // ~~ Unordered Lists | ||||
| var reUnorderedList = regexp.MustCompile(`^(\s*)[-\+]\s+(.+)`) | ||||
|  | ||||
| func isUnorderedList(data []byte) bool { | ||||
| 	return reUnorderedList.Match(data) | ||||
| } | ||||
|  | ||||
| // ~~ Tables | ||||
| var reTableHeaders = regexp.MustCompile(`^[|+-]*$`) | ||||
|  | ||||
| func isTable(data []byte) bool { | ||||
| 	return charMatches(data[0], '|') | ||||
| } | ||||
|  | ||||
| func (p *parser) generateTable(output *bytes.Buffer, data []byte) { | ||||
| 	var table bytes.Buffer | ||||
| 	rows := bytes.Split(bytes.Trim(data, "\n"), []byte("\n")) | ||||
| 	hasTableHeaders := len(rows) > 1 | ||||
| 	if len(rows) > 1 { | ||||
| 		hasTableHeaders = reTableHeaders.Match(rows[1]) | ||||
| 	} | ||||
| 	tbodySet := false | ||||
|  | ||||
| 	for idx, row := range rows { | ||||
| 		var rowBuff bytes.Buffer | ||||
| 		if hasTableHeaders && idx == 0 { | ||||
| 			table.WriteString("<thead>") | ||||
| 			for _, cell := range bytes.Split(row[1:len(row)-1], []byte("|")) { | ||||
| 				p.r.TableHeaderCell(&rowBuff, bytes.Trim(cell, " \t"), 0) | ||||
| 			} | ||||
| 			p.r.TableRow(&table, rowBuff.Bytes()) | ||||
| 			table.WriteString("</thead>\n") | ||||
| 		} else if hasTableHeaders && idx == 1 { | ||||
| 			continue | ||||
| 		} else { | ||||
| 			if !tbodySet { | ||||
| 				table.WriteString("<tbody>") | ||||
| 				tbodySet = true | ||||
| 			} | ||||
| 			if !reTableHeaders.Match(row) { | ||||
| 				for _, cell := range bytes.Split(row[1:len(row)-1], []byte("|")) { | ||||
| 					var cellBuff bytes.Buffer | ||||
| 					p.inline(&cellBuff, bytes.Trim(cell, " \t")) | ||||
| 					p.r.TableCell(&rowBuff, cellBuff.Bytes(), 0) | ||||
| 				} | ||||
| 				p.r.TableRow(&table, rowBuff.Bytes()) | ||||
| 			} | ||||
| 			if tbodySet && idx == len(rows)-1 { | ||||
| 				table.WriteString("</tbody>\n") | ||||
| 				tbodySet = false | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	output.WriteString("\n<table>\n") | ||||
| 	output.Write(table.Bytes()) | ||||
| 	output.WriteString("</table>\n") | ||||
| } | ||||
|  | ||||
| // ~~ Property Drawers | ||||
|  | ||||
| func isPropertyDrawer(data []byte) bool { | ||||
| 	return bytes.Equal(data, []byte(":PROPERTIES:")) | ||||
| } | ||||
|  | ||||
| // ~~ Dynamic Blocks | ||||
| var reBlock = regexp.MustCompile(`^#\+(BEGIN|END)_(\w+)\s*([0-9A-Za-z_\-]*)?`) | ||||
|  | ||||
| func isBlock(data []byte) bool { | ||||
| 	return reBlock.Match(data) | ||||
| } | ||||
|  | ||||
| // ~~ Footnotes | ||||
| var reFootnoteDef = regexp.MustCompile(`^\[fn:([\w]+)\] +(.+)`) | ||||
|  | ||||
| func isFootnoteDef(data []byte) bool { | ||||
| 	return reFootnoteDef.Match(data) | ||||
| } | ||||
|  | ||||
| // Elements | ||||
| // ~~ Keywords | ||||
| func IsKeyword(data []byte) bool { | ||||
| 	return len(data) > 2 && charMatches(data[0], '#') && charMatches(data[1], '+') && !charMatches(data[2], ' ') | ||||
| } | ||||
|  | ||||
| // ~~ Comments | ||||
| func isComment(data []byte) bool { | ||||
| 	return charMatches(data[0], '#') && charMatches(data[1], ' ') | ||||
| } | ||||
|  | ||||
| func (p *parser) generateComment(out *bytes.Buffer, data []byte) { | ||||
| 	var work bytes.Buffer | ||||
| 	work.WriteString("<!-- ") | ||||
| 	work.Write(data[2:]) | ||||
| 	work.WriteString(" -->") | ||||
| 	work.WriteByte('\n') | ||||
| 	out.Write(work.Bytes()) | ||||
| } | ||||
|  | ||||
| // ~~ Horizontal Rules | ||||
| var reHorizontalRule = regexp.MustCompile(`^\s*?-----\s?$`) | ||||
|  | ||||
| func isHorizontalRule(data []byte) bool { | ||||
| 	return reHorizontalRule.Match(data) | ||||
| } | ||||
|  | ||||
| // ~~ Paragraphs | ||||
| func (p *parser) generateParagraph(out *bytes.Buffer, data []byte) { | ||||
| 	generate := func() bool { | ||||
| 		p.inline(out, bytes.Trim(data, " ")) | ||||
| 		return true | ||||
| 	} | ||||
| 	p.r.Paragraph(out, generate) | ||||
| } | ||||
|  | ||||
| func (p *parser) generateList(output *bytes.Buffer, data []byte, listType string) { | ||||
| 	generateList := func() bool { | ||||
| 		output.WriteByte('\n') | ||||
| 		p.inline(output, bytes.Trim(data, " ")) | ||||
| 		return true | ||||
| 	} | ||||
| 	switch listType { | ||||
| 	case "ul": | ||||
| 		p.r.List(output, generateList, 0) | ||||
| 	case "ol": | ||||
| 		p.r.List(output, generateList, blackfriday.LIST_TYPE_ORDERED) | ||||
| 	case "dl": | ||||
| 		p.r.List(output, generateList, blackfriday.LIST_TYPE_DEFINITION) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Objects | ||||
|  | ||||
| func (p *parser) inline(out *bytes.Buffer, data []byte) { | ||||
| 	i, end := 0, 0 | ||||
|  | ||||
| 	for i < len(data) { | ||||
| 		for end < len(data) && p.inlineCallback[data[end]] == nil { | ||||
| 			end++ | ||||
| 		} | ||||
|  | ||||
| 		p.r.Entity(out, data[i:end]) | ||||
|  | ||||
| 		if end >= len(data) { | ||||
| 			break | ||||
| 		} | ||||
| 		i = end | ||||
|  | ||||
| 		handler := p.inlineCallback[data[i]] | ||||
|  | ||||
| 		if consumed := handler(p, out, data, i); consumed > 0 { | ||||
| 			i += consumed | ||||
| 			end = i | ||||
| 			continue | ||||
| 		} | ||||
|  | ||||
| 		end = i + 1 | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func isAcceptablePreOpeningChar(dataIn, data []byte, offset int) bool { | ||||
| 	if len(dataIn) == len(data) { | ||||
| 		return true | ||||
| 	} | ||||
|  | ||||
| 	char := dataIn[offset-1] | ||||
| 	return charMatches(char, ' ') || isPreChar(char) | ||||
| } | ||||
|  | ||||
| func isPreChar(char byte) bool { | ||||
| 	return charMatches(char, '>') || charMatches(char, '(') || charMatches(char, '{') || charMatches(char, '[') | ||||
| } | ||||
|  | ||||
| func isAcceptablePostClosingChar(char byte) bool { | ||||
| 	return charMatches(char, ' ') || isTerminatingChar(char) | ||||
| } | ||||
|  | ||||
| func isTerminatingChar(char byte) bool { | ||||
| 	return charMatches(char, '.') || charMatches(char, ',') || charMatches(char, '?') || charMatches(char, '!') || charMatches(char, ')') || charMatches(char, '}') || charMatches(char, ']') | ||||
| } | ||||
|  | ||||
| func findLastCharInInline(data []byte, char byte) int { | ||||
| 	timesFound := 0 | ||||
| 	last := 0 | ||||
| 	// Start from character after the inline indicator | ||||
| 	for i := 1; i < len(data); i++ { | ||||
| 		if timesFound == 1 { | ||||
| 			break | ||||
| 		} | ||||
| 		if data[i] == char { | ||||
| 			if len(data) == i+1 || (len(data) > i+1 && isAcceptablePostClosingChar(data[i+1])) { | ||||
| 				last = i | ||||
| 				timesFound += 1 | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return last | ||||
| } | ||||
|  | ||||
| func generator(p *parser, out *bytes.Buffer, dataIn []byte, offset int, char byte, doInline bool, renderer func(*bytes.Buffer, []byte)) int { | ||||
| 	data := dataIn[offset:] | ||||
| 	c := byte(char) | ||||
| 	start := 1 | ||||
| 	i := start | ||||
| 	if len(data) <= 1 { | ||||
| 		return 0 | ||||
| 	} | ||||
|  | ||||
| 	lastCharInside := findLastCharInInline(data, c) | ||||
|  | ||||
| 	// Org mode spec says a non-whitespace character must immediately follow. | ||||
| 	// if the current char is the marker, then there's no text between, not a candidate | ||||
| 	if isSpace(data[i]) || lastCharInside == i || !isAcceptablePreOpeningChar(dataIn, data, offset) { | ||||
| 		return 0 | ||||
| 	} | ||||
|  | ||||
| 	if lastCharInside > 0 { | ||||
| 		var work bytes.Buffer | ||||
| 		if doInline { | ||||
| 			p.inline(&work, data[start:lastCharInside]) | ||||
| 			renderer(out, work.Bytes()) | ||||
| 		} else { | ||||
| 			renderer(out, data[start:lastCharInside]) | ||||
| 		} | ||||
| 		next := lastCharInside + 1 | ||||
| 		return next | ||||
| 	} | ||||
|  | ||||
| 	return 0 | ||||
| } | ||||
|  | ||||
| // ~~ Text Markup | ||||
| func generateVerbatim(p *parser, out *bytes.Buffer, data []byte, offset int) int { | ||||
| 	return generator(p, out, data, offset, '=', false, p.r.CodeSpan) | ||||
| } | ||||
|  | ||||
| func generateCode(p *parser, out *bytes.Buffer, data []byte, offset int) int { | ||||
| 	return generator(p, out, data, offset, '~', false, p.r.CodeSpan) | ||||
| } | ||||
|  | ||||
| func generateEmphasis(p *parser, out *bytes.Buffer, data []byte, offset int) int { | ||||
| 	return generator(p, out, data, offset, '/', true, p.r.Emphasis) | ||||
| } | ||||
|  | ||||
| func generateUnderline(p *parser, out *bytes.Buffer, data []byte, offset int) int { | ||||
| 	underline := func(out *bytes.Buffer, text []byte) { | ||||
| 		out.WriteString("<span style=\"text-decoration: underline;\">") | ||||
| 		out.Write(text) | ||||
| 		out.WriteString("</span>") | ||||
| 	} | ||||
|  | ||||
| 	return generator(p, out, data, offset, '_', true, underline) | ||||
| } | ||||
|  | ||||
| func generateBold(p *parser, out *bytes.Buffer, data []byte, offset int) int { | ||||
| 	return generator(p, out, data, offset, '*', true, p.r.DoubleEmphasis) | ||||
| } | ||||
|  | ||||
| func generateStrikethrough(p *parser, out *bytes.Buffer, data []byte, offset int) int { | ||||
| 	return generator(p, out, data, offset, '+', true, p.r.StrikeThrough) | ||||
| } | ||||
|  | ||||
| // ~~ Images and Links (inc. Footnote) | ||||
| var reLinkOrImg = regexp.MustCompile(`\[\[(.+?)\]\[?(.*?)\]?\]`) | ||||
|  | ||||
| func generateLinkOrImg(p *parser, out *bytes.Buffer, data []byte, offset int) int { | ||||
| 	data = data[offset+1:] | ||||
| 	start := 1 | ||||
| 	i := start | ||||
| 	var hyperlink []byte | ||||
| 	isImage := false | ||||
| 	isFootnote := false | ||||
| 	closedLink := false | ||||
| 	hasContent := false | ||||
|  | ||||
| 	if bytes.Equal(data[0:3], []byte("fn:")) { | ||||
| 		isFootnote = true | ||||
| 	} else if data[0] != '[' { | ||||
| 		return 0 | ||||
| 	} | ||||
|  | ||||
| 	if bytes.Equal(data[1:6], []byte("file:")) { | ||||
| 		isImage = true | ||||
| 	} | ||||
|  | ||||
| 	for i < len(data) { | ||||
| 		currChar := data[i] | ||||
| 		switch { | ||||
| 		case charMatches(currChar, ']') && closedLink == false: | ||||
| 			if isImage { | ||||
| 				hyperlink = data[start+5 : i] | ||||
| 			} else if isFootnote { | ||||
| 				refid := data[start+2 : i] | ||||
| 				if bytes.Equal(refid, bytes.Trim(refid, " ")) { | ||||
| 					p.notes = append(p.notes, footnotes{string(refid), "DEFINITION NOT FOUND"}) | ||||
| 					p.r.FootnoteRef(out, refid, len(p.notes)) | ||||
| 					return i + 2 | ||||
| 				} else { | ||||
| 					return 0 | ||||
| 				} | ||||
| 			} else if bytes.Equal(data[i-4:i], []byte(".org")) { | ||||
| 				orgStart := start | ||||
| 				if bytes.Equal(data[orgStart:orgStart+2], []byte("./")) { | ||||
| 					orgStart = orgStart + 1 | ||||
| 				} | ||||
| 				hyperlink = data[orgStart : i-4] | ||||
| 			} else { | ||||
| 				hyperlink = data[start:i] | ||||
| 			} | ||||
| 			closedLink = true | ||||
| 		case charMatches(currChar, '['): | ||||
| 			start = i + 1 | ||||
| 			hasContent = true | ||||
| 		case charMatches(currChar, ']') && closedLink == true && hasContent == true && isImage == true: | ||||
| 			p.r.Image(out, hyperlink, data[start:i], data[start:i]) | ||||
| 			return i + 3 | ||||
| 		case charMatches(currChar, ']') && closedLink == true && hasContent == true: | ||||
| 			var tmpBuf bytes.Buffer | ||||
| 			p.inline(&tmpBuf, data[start:i]) | ||||
| 			p.r.Link(out, hyperlink, tmpBuf.Bytes(), tmpBuf.Bytes()) | ||||
| 			return i + 3 | ||||
| 		case charMatches(currChar, ']') && closedLink == true && hasContent == false && isImage == true: | ||||
| 			p.r.Image(out, hyperlink, hyperlink, hyperlink) | ||||
| 			return i + 2 | ||||
| 		case charMatches(currChar, ']') && closedLink == true && hasContent == false: | ||||
| 			p.r.Link(out, hyperlink, hyperlink, hyperlink) | ||||
| 			return i + 2 | ||||
| 		} | ||||
| 		i++ | ||||
| 	} | ||||
|  | ||||
| 	return 0 | ||||
| } | ||||
|  | ||||
| // Helpers | ||||
| func skipChar(data []byte, start int, char byte) int { | ||||
| 	i := start | ||||
| 	for i < len(data) && charMatches(data[i], char) { | ||||
| 		i++ | ||||
| 	} | ||||
| 	return i | ||||
| } | ||||
|  | ||||
| func isSpace(char byte) bool { | ||||
| 	return charMatches(char, ' ') | ||||
| } | ||||
|  | ||||
| func isEmpty(data []byte) bool { | ||||
| 	if len(data) == 0 { | ||||
| 		return true | ||||
| 	} | ||||
|  | ||||
| 	for i := 0; i < len(data) && !charMatches(data[i], '\n'); i++ { | ||||
| 		if !charMatches(data[i], ' ') && !charMatches(data[i], '\t') { | ||||
| 			return false | ||||
| 		} | ||||
| 	} | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| func charMatches(a byte, b byte) bool { | ||||
| 	return a == b | ||||
| } | ||||
							
								
								
									
										
											BIN
										
									
								
								vendor/github.com/chaseadamsio/goorgeous/gopher.gif
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										
											BIN
										
									
								
								vendor/github.com/chaseadamsio/goorgeous/gopher.gif
									
									
									
										generated
									
									
										vendored
									
									
								
							
										
											Binary file not shown.
										
									
								
							| Before Width: | Height: | Size: 15 KiB | 
							
								
								
									
										
											BIN
										
									
								
								vendor/github.com/chaseadamsio/goorgeous/gopher_small.gif
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										
											BIN
										
									
								
								vendor/github.com/chaseadamsio/goorgeous/gopher_small.gif
									
									
									
										generated
									
									
										vendored
									
									
								
							
										
											Binary file not shown.
										
									
								
							| Before Width: | Height: | Size: 3.2 KiB | 
							
								
								
									
										70
									
								
								vendor/github.com/chaseadamsio/goorgeous/header.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										70
									
								
								vendor/github.com/chaseadamsio/goorgeous/header.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -1,70 +0,0 @@ | ||||
| package goorgeous | ||||
|  | ||||
| import ( | ||||
| 	"bufio" | ||||
| 	"bytes" | ||||
| 	"regexp" | ||||
| 	"strings" | ||||
| ) | ||||
|  | ||||
| // ExtractOrgHeaders finds and returns all of the headers | ||||
| // from a bufio.Reader and returns them as their own byte slice | ||||
| func ExtractOrgHeaders(r *bufio.Reader) (fm []byte, err error) { | ||||
| 	var out bytes.Buffer | ||||
| 	endOfHeaders := true | ||||
| 	for endOfHeaders { | ||||
| 		p, err := r.Peek(2) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		if !charMatches(p[0], '#') && !charMatches(p[1], '+') { | ||||
| 			endOfHeaders = false | ||||
| 			break | ||||
| 		} | ||||
| 		line, _, err := r.ReadLine() | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		out.Write(line) | ||||
| 		out.WriteByte('\n') | ||||
| 	} | ||||
| 	return out.Bytes(), nil | ||||
| } | ||||
|  | ||||
| var reHeader = regexp.MustCompile(`^#\+(\w+?): (.*)`) | ||||
|  | ||||
| // OrgHeaders find all of the headers from a byte slice and returns | ||||
| // them as a map of string interface | ||||
| func OrgHeaders(input []byte) (map[string]interface{}, error) { | ||||
| 	out := make(map[string]interface{}) | ||||
| 	scanner := bufio.NewScanner(bytes.NewReader(input)) | ||||
|  | ||||
| 	for scanner.Scan() { | ||||
| 		data := scanner.Bytes() | ||||
| 		if !charMatches(data[0], '#') && !charMatches(data[1], '+') { | ||||
| 			return out, nil | ||||
| 		} | ||||
| 		matches := reHeader.FindSubmatch(data) | ||||
|  | ||||
| 		if len(matches) < 3 { | ||||
| 			continue | ||||
| 		} | ||||
|  | ||||
| 		key := string(matches[1]) | ||||
| 		val := matches[2] | ||||
| 		switch { | ||||
| 		case strings.ToLower(key) == "tags" || strings.ToLower(key) == "categories" || strings.ToLower(key) == "aliases": | ||||
| 			bTags := bytes.Split(val, []byte(" ")) | ||||
| 			tags := make([]string, len(bTags)) | ||||
| 			for idx, tag := range bTags { | ||||
| 				tags[idx] = string(tag) | ||||
| 			} | ||||
| 			out[key] = tags | ||||
| 		default: | ||||
| 			out[key] = string(val) | ||||
| 		} | ||||
|  | ||||
| 	} | ||||
| 	return out, nil | ||||
|  | ||||
| } | ||||
| @@ -1,6 +1,6 @@ | ||||
| MIT License | ||||
| 
 | ||||
| Copyright (c) 2017 Chase Adams <realchaseadams@gmail.com> | ||||
| Copyright (c) 2018 Niklas Fasching | ||||
| 
 | ||||
| Permission is hereby granted, free of charge, to any person obtaining a copy | ||||
| of this software and associated documentation files (the "Software"), to deal | ||||
							
								
								
									
										84
									
								
								vendor/github.com/niklasfasching/go-org/org/block.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										84
									
								
								vendor/github.com/niklasfasching/go-org/org/block.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,84 @@ | ||||
| package org | ||||
|  | ||||
| import ( | ||||
| 	"regexp" | ||||
| 	"strings" | ||||
| 	"unicode" | ||||
| ) | ||||
|  | ||||
| type Block struct { | ||||
| 	Name       string | ||||
| 	Parameters []string | ||||
| 	Children   []Node | ||||
| } | ||||
|  | ||||
| type Example struct { | ||||
| 	Children []Node | ||||
| } | ||||
|  | ||||
| var exampleLineRegexp = regexp.MustCompile(`^(\s*):(\s(.*)|\s*$)`) | ||||
| var beginBlockRegexp = regexp.MustCompile(`(?i)^(\s*)#\+BEGIN_(\w+)(.*)`) | ||||
| var endBlockRegexp = regexp.MustCompile(`(?i)^(\s*)#\+END_(\w+)`) | ||||
|  | ||||
| func lexBlock(line string) (token, bool) { | ||||
| 	if m := beginBlockRegexp.FindStringSubmatch(line); m != nil { | ||||
| 		return token{"beginBlock", len(m[1]), strings.ToUpper(m[2]), m}, true | ||||
| 	} else if m := endBlockRegexp.FindStringSubmatch(line); m != nil { | ||||
| 		return token{"endBlock", len(m[1]), strings.ToUpper(m[2]), m}, true | ||||
| 	} | ||||
| 	return nilToken, false | ||||
| } | ||||
|  | ||||
| func lexExample(line string) (token, bool) { | ||||
| 	if m := exampleLineRegexp.FindStringSubmatch(line); m != nil { | ||||
| 		return token{"example", len(m[1]), m[3], m}, true | ||||
| 	} | ||||
| 	return nilToken, false | ||||
| } | ||||
|  | ||||
| func isRawTextBlock(name string) bool { return name == "SRC" || name == "EXAMPLE" || name == "EXPORT" } | ||||
|  | ||||
| func (d *Document) parseBlock(i int, parentStop stopFn) (int, Node) { | ||||
| 	t, start := d.tokens[i], i | ||||
| 	name, parameters := t.content, strings.Fields(t.matches[3]) | ||||
| 	trim := trimIndentUpTo(d.tokens[i].lvl) | ||||
| 	stop := func(d *Document, i int) bool { | ||||
| 		return i >= len(d.tokens) || (d.tokens[i].kind == "endBlock" && d.tokens[i].content == name) | ||||
| 	} | ||||
| 	block, i := Block{name, parameters, nil}, i+1 | ||||
| 	if isRawTextBlock(name) { | ||||
| 		rawText := "" | ||||
| 		for ; !stop(d, i); i++ { | ||||
| 			rawText += trim(d.tokens[i].matches[0]) + "\n" | ||||
| 		} | ||||
| 		block.Children = d.parseRawInline(rawText) | ||||
| 	} else { | ||||
| 		consumed, nodes := d.parseMany(i, stop) | ||||
| 		block.Children = nodes | ||||
| 		i += consumed | ||||
| 	} | ||||
| 	if i < len(d.tokens) && d.tokens[i].kind == "endBlock" && d.tokens[i].content == name { | ||||
| 		return i + 1 - start, block | ||||
| 	} | ||||
| 	return 0, nil | ||||
| } | ||||
|  | ||||
| func (d *Document) parseExample(i int, parentStop stopFn) (int, Node) { | ||||
| 	example, start := Example{}, i | ||||
| 	for ; !parentStop(d, i) && d.tokens[i].kind == "example"; i++ { | ||||
| 		example.Children = append(example.Children, Text{d.tokens[i].content, true}) | ||||
| 	} | ||||
| 	return i - start, example | ||||
| } | ||||
|  | ||||
| func trimIndentUpTo(max int) func(string) string { | ||||
| 	return func(line string) string { | ||||
| 		i := 0 | ||||
| 		for ; i < len(line) && i < max && unicode.IsSpace(rune(line[i])); i++ { | ||||
| 		} | ||||
| 		return line[i:] | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (n Example) String() string { return orgWriter.nodesAsString(n) } | ||||
| func (n Block) String() string   { return orgWriter.nodesAsString(n) } | ||||
							
								
								
									
										260
									
								
								vendor/github.com/niklasfasching/go-org/org/document.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										260
									
								
								vendor/github.com/niklasfasching/go-org/org/document.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,260 @@ | ||||
| // Package org is an Org mode syntax processor. | ||||
| // | ||||
| // It parses plain text into an AST and can export it as HTML or pretty printed Org mode syntax. | ||||
| // Further export formats can be defined using the Writer interface. | ||||
| // | ||||
| // You probably want to start with something like this: | ||||
| //   input := strings.NewReader("Your Org mode input") | ||||
| //   html, err := org.New().Parse(input, "./").Write(org.NewHTMLWriter()) | ||||
| //   if err != nil { | ||||
| //       log.Fatalf("Something went wrong: %s", err) | ||||
| //   } | ||||
| //   log.Print(html) | ||||
| package org | ||||
|  | ||||
| import ( | ||||
| 	"bufio" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"io/ioutil" | ||||
| 	"log" | ||||
| 	"os" | ||||
| 	"strings" | ||||
| ) | ||||
|  | ||||
| type Configuration struct { | ||||
| 	MaxEmphasisNewLines int                                   // Maximum number of newlines inside an emphasis. See org-emphasis-regexp-components newline. | ||||
| 	AutoLink            bool                                  // Try to convert text passages that look like hyperlinks into hyperlinks. | ||||
| 	DefaultSettings     map[string]string                     // Default values for settings that are overriden by setting the same key in BufferSettings. | ||||
| 	Log                 *log.Logger                           // Log is used to print warnings during parsing. | ||||
| 	ReadFile            func(filename string) ([]byte, error) // ReadFile is used to read e.g. #+INCLUDE files. | ||||
| } | ||||
|  | ||||
| // Document contains the parsing results and a pointer to the Configuration. | ||||
| type Document struct { | ||||
| 	*Configuration | ||||
| 	Path           string // Path of the file containing the parse input - used to resolve relative paths during parsing (e.g. INCLUDE). | ||||
| 	tokens         []token | ||||
| 	Nodes          []Node | ||||
| 	NamedNodes     map[string]Node | ||||
| 	Outline        Outline           // Outline is a Table Of Contents for the document and contains all sections (headline + content). | ||||
| 	BufferSettings map[string]string // Settings contains all settings that were parsed from keywords. | ||||
| 	Error          error | ||||
| } | ||||
|  | ||||
| // Node represents a parsed node of the document. | ||||
| type Node interface { | ||||
| 	String() string // String returns the pretty printed Org mode string for the node (see OrgWriter). | ||||
| } | ||||
|  | ||||
| type lexFn = func(line string) (t token, ok bool) | ||||
| type parseFn = func(*Document, int, stopFn) (int, Node) | ||||
| type stopFn = func(*Document, int) bool | ||||
|  | ||||
| type token struct { | ||||
| 	kind    string | ||||
| 	lvl     int | ||||
| 	content string | ||||
| 	matches []string | ||||
| } | ||||
|  | ||||
| var lexFns = []lexFn{ | ||||
| 	lexHeadline, | ||||
| 	lexDrawer, | ||||
| 	lexBlock, | ||||
| 	lexList, | ||||
| 	lexTable, | ||||
| 	lexHorizontalRule, | ||||
| 	lexKeywordOrComment, | ||||
| 	lexFootnoteDefinition, | ||||
| 	lexExample, | ||||
| 	lexText, | ||||
| } | ||||
|  | ||||
| var nilToken = token{"nil", -1, "", nil} | ||||
| var orgWriter = NewOrgWriter() | ||||
|  | ||||
| // New returns a new Configuration with (hopefully) sane defaults. | ||||
| func New() *Configuration { | ||||
| 	return &Configuration{ | ||||
| 		AutoLink:            true, | ||||
| 		MaxEmphasisNewLines: 1, | ||||
| 		DefaultSettings: map[string]string{ | ||||
| 			"TODO":         "TODO | DONE", | ||||
| 			"EXCLUDE_TAGS": "noexport", | ||||
| 			"OPTIONS":      "toc:t <:t e:t f:t pri:t todo:t tags:t", | ||||
| 		}, | ||||
| 		Log:      log.New(os.Stderr, "go-org: ", 0), | ||||
| 		ReadFile: ioutil.ReadFile, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // String returns the pretty printed Org mode string for the given nodes (see OrgWriter). | ||||
| func String(nodes []Node) string { return orgWriter.nodesAsString(nodes...) } | ||||
|  | ||||
| // Write is called after with an instance of the Writer interface to export a parsed Document into another format. | ||||
| func (d *Document) Write(w Writer) (out string, err error) { | ||||
| 	defer func() { | ||||
| 		if recovered := recover(); recovered != nil { | ||||
| 			err = fmt.Errorf("could not write output: %s", recovered) | ||||
| 		} | ||||
| 	}() | ||||
| 	if d.Error != nil { | ||||
| 		return "", d.Error | ||||
| 	} else if d.Nodes == nil { | ||||
| 		return "", fmt.Errorf("could not write output: parse was not called") | ||||
| 	} | ||||
| 	w.Before(d) | ||||
| 	WriteNodes(w, d.Nodes...) | ||||
| 	w.After(d) | ||||
| 	return w.String(), err | ||||
| } | ||||
|  | ||||
| // Parse parses the input into an AST (and some other helpful fields like Outline). | ||||
| // To allow method chaining, errors are stored in document.Error rather than being returned. | ||||
| func (c *Configuration) Parse(input io.Reader, path string) (d *Document) { | ||||
| 	outlineSection := &Section{} | ||||
| 	d = &Document{ | ||||
| 		Configuration:  c, | ||||
| 		Outline:        Outline{outlineSection, outlineSection, 0}, | ||||
| 		BufferSettings: map[string]string{}, | ||||
| 		NamedNodes:     map[string]Node{}, | ||||
| 		Path:           path, | ||||
| 	} | ||||
| 	defer func() { | ||||
| 		if recovered := recover(); recovered != nil { | ||||
| 			d.Error = fmt.Errorf("could not parse input: %v", recovered) | ||||
| 		} | ||||
| 	}() | ||||
| 	if d.tokens != nil { | ||||
| 		d.Error = fmt.Errorf("parse was called multiple times") | ||||
| 	} | ||||
| 	d.tokenize(input) | ||||
| 	_, nodes := d.parseMany(0, func(d *Document, i int) bool { return i >= len(d.tokens) }) | ||||
| 	d.Nodes = nodes | ||||
| 	return d | ||||
| } | ||||
|  | ||||
| // Silent disables all logging of warnings during parsing. | ||||
| func (c *Configuration) Silent() *Configuration { | ||||
| 	c.Log = log.New(ioutil.Discard, "", 0) | ||||
| 	return c | ||||
| } | ||||
|  | ||||
| func (d *Document) tokenize(input io.Reader) { | ||||
| 	d.tokens = []token{} | ||||
| 	scanner := bufio.NewScanner(input) | ||||
| 	for scanner.Scan() { | ||||
| 		d.tokens = append(d.tokens, tokenize(scanner.Text())) | ||||
| 	} | ||||
| 	if err := scanner.Err(); err != nil { | ||||
| 		d.Error = fmt.Errorf("could not tokenize input: %s", err) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Get returns the value for key in BufferSettings or DefaultSettings if key does not exist in the former | ||||
| func (d *Document) Get(key string) string { | ||||
| 	if v, ok := d.BufferSettings[key]; ok { | ||||
| 		return v | ||||
| 	} | ||||
| 	if v, ok := d.DefaultSettings[key]; ok { | ||||
| 		return v | ||||
| 	} | ||||
| 	return "" | ||||
| } | ||||
|  | ||||
| // GetOption returns the value associated to the export option key | ||||
| // Currently supported options: | ||||
| // - < (export timestamps) | ||||
| // - e (export org entities) | ||||
| // - f (export footnotes) | ||||
| // - toc (export table of content) | ||||
| // - todo (export headline todo status) | ||||
| // - pri (export headline priority) | ||||
| // - tags (export headline tags) | ||||
| // see https://orgmode.org/manual/Export-settings.html for more information | ||||
| func (d *Document) GetOption(key string) bool { | ||||
| 	get := func(settings map[string]string) string { | ||||
| 		for _, field := range strings.Fields(settings["OPTIONS"]) { | ||||
| 			if strings.HasPrefix(field, key+":") { | ||||
| 				return field[len(key)+1:] | ||||
| 			} | ||||
| 		} | ||||
| 		return "" | ||||
| 	} | ||||
| 	value := get(d.BufferSettings) | ||||
| 	if value == "" { | ||||
| 		value = get(d.DefaultSettings) | ||||
| 	} | ||||
| 	switch value { | ||||
| 	case "t": | ||||
| 		return true | ||||
| 	case "nil": | ||||
| 		return false | ||||
| 	default: | ||||
| 		d.Log.Printf("Bad value for export option %s (%s)", key, value) | ||||
| 		return false | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (d *Document) parseOne(i int, stop stopFn) (consumed int, node Node) { | ||||
| 	switch d.tokens[i].kind { | ||||
| 	case "unorderedList", "orderedList": | ||||
| 		consumed, node = d.parseList(i, stop) | ||||
| 	case "tableRow", "tableSeparator": | ||||
| 		consumed, node = d.parseTable(i, stop) | ||||
| 	case "beginBlock": | ||||
| 		consumed, node = d.parseBlock(i, stop) | ||||
| 	case "beginDrawer": | ||||
| 		consumed, node = d.parseDrawer(i, stop) | ||||
| 	case "text": | ||||
| 		consumed, node = d.parseParagraph(i, stop) | ||||
| 	case "example": | ||||
| 		consumed, node = d.parseExample(i, stop) | ||||
| 	case "horizontalRule": | ||||
| 		consumed, node = d.parseHorizontalRule(i, stop) | ||||
| 	case "comment": | ||||
| 		consumed, node = d.parseComment(i, stop) | ||||
| 	case "keyword": | ||||
| 		consumed, node = d.parseKeyword(i, stop) | ||||
| 	case "headline": | ||||
| 		consumed, node = d.parseHeadline(i, stop) | ||||
| 	case "footnoteDefinition": | ||||
| 		consumed, node = d.parseFootnoteDefinition(i, stop) | ||||
| 	} | ||||
|  | ||||
| 	if consumed != 0 { | ||||
| 		return consumed, node | ||||
| 	} | ||||
| 	d.Log.Printf("Could not parse token %#v: Falling back to treating it as plain text.", d.tokens[i]) | ||||
| 	m := plainTextRegexp.FindStringSubmatch(d.tokens[i].matches[0]) | ||||
| 	d.tokens[i] = token{"text", len(m[1]), m[2], m} | ||||
| 	return d.parseOne(i, stop) | ||||
| } | ||||
|  | ||||
| func (d *Document) parseMany(i int, stop stopFn) (int, []Node) { | ||||
| 	start, nodes := i, []Node{} | ||||
| 	for i < len(d.tokens) && !stop(d, i) { | ||||
| 		consumed, node := d.parseOne(i, stop) | ||||
| 		i += consumed | ||||
| 		nodes = append(nodes, node) | ||||
| 	} | ||||
| 	return i - start, nodes | ||||
| } | ||||
|  | ||||
| func (d *Document) addHeadline(headline *Headline) int { | ||||
| 	current := &Section{Headline: headline} | ||||
| 	d.Outline.last.add(current) | ||||
| 	d.Outline.count++ | ||||
| 	d.Outline.last = current | ||||
| 	return d.Outline.count | ||||
| } | ||||
|  | ||||
| func tokenize(line string) token { | ||||
| 	for _, lexFn := range lexFns { | ||||
| 		if token, ok := lexFn(line); ok { | ||||
| 			return token | ||||
| 		} | ||||
| 	} | ||||
| 	panic(fmt.Sprintf("could not lex line: %s", line)) | ||||
| } | ||||
							
								
								
									
										97
									
								
								vendor/github.com/niklasfasching/go-org/org/drawer.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										97
									
								
								vendor/github.com/niklasfasching/go-org/org/drawer.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,97 @@ | ||||
| package org | ||||
|  | ||||
| import ( | ||||
| 	"regexp" | ||||
| 	"strings" | ||||
| ) | ||||
|  | ||||
| type Drawer struct { | ||||
| 	Name     string | ||||
| 	Children []Node | ||||
| } | ||||
|  | ||||
| type PropertyDrawer struct { | ||||
| 	Properties [][]string | ||||
| } | ||||
|  | ||||
| var beginDrawerRegexp = regexp.MustCompile(`^(\s*):(\S+):\s*$`) | ||||
| var endDrawerRegexp = regexp.MustCompile(`^(\s*):END:\s*$`) | ||||
| var propertyRegexp = regexp.MustCompile(`^(\s*):(\S+):(\s+(.*)$|$)`) | ||||
|  | ||||
| func lexDrawer(line string) (token, bool) { | ||||
| 	if m := endDrawerRegexp.FindStringSubmatch(line); m != nil { | ||||
| 		return token{"endDrawer", len(m[1]), "", m}, true | ||||
| 	} else if m := beginDrawerRegexp.FindStringSubmatch(line); m != nil { | ||||
| 		return token{"beginDrawer", len(m[1]), strings.ToUpper(m[2]), m}, true | ||||
| 	} | ||||
| 	return nilToken, false | ||||
| } | ||||
|  | ||||
| func (d *Document) parseDrawer(i int, parentStop stopFn) (int, Node) { | ||||
| 	name := strings.ToUpper(d.tokens[i].content) | ||||
| 	if name == "PROPERTIES" { | ||||
| 		return d.parsePropertyDrawer(i, parentStop) | ||||
| 	} | ||||
| 	drawer, start := Drawer{Name: name}, i | ||||
| 	i++ | ||||
| 	stop := func(d *Document, i int) bool { | ||||
| 		if parentStop(d, i) { | ||||
| 			return true | ||||
| 		} | ||||
| 		kind := d.tokens[i].kind | ||||
| 		return kind == "beginDrawer" || kind == "endDrawer" || kind == "headline" | ||||
| 	} | ||||
| 	for { | ||||
| 		consumed, nodes := d.parseMany(i, stop) | ||||
| 		i += consumed | ||||
| 		drawer.Children = append(drawer.Children, nodes...) | ||||
| 		if i < len(d.tokens) && d.tokens[i].kind == "beginDrawer" { | ||||
| 			p := Paragraph{[]Node{Text{":" + d.tokens[i].content + ":", false}}} | ||||
| 			drawer.Children = append(drawer.Children, p) | ||||
| 			i++ | ||||
| 		} else { | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
| 	if i < len(d.tokens) && d.tokens[i].kind == "endDrawer" { | ||||
| 		i++ | ||||
| 	} | ||||
| 	return i - start, drawer | ||||
| } | ||||
|  | ||||
| func (d *Document) parsePropertyDrawer(i int, parentStop stopFn) (int, Node) { | ||||
| 	drawer, start := PropertyDrawer{}, i | ||||
| 	i++ | ||||
| 	stop := func(d *Document, i int) bool { | ||||
| 		return parentStop(d, i) || (d.tokens[i].kind != "text" && d.tokens[i].kind != "beginDrawer") | ||||
| 	} | ||||
| 	for ; !stop(d, i); i++ { | ||||
| 		m := propertyRegexp.FindStringSubmatch(d.tokens[i].matches[0]) | ||||
| 		if m == nil { | ||||
| 			return 0, nil | ||||
| 		} | ||||
| 		k, v := strings.ToUpper(m[2]), strings.TrimSpace(m[4]) | ||||
| 		drawer.Properties = append(drawer.Properties, []string{k, v}) | ||||
| 	} | ||||
| 	if i < len(d.tokens) && d.tokens[i].kind == "endDrawer" { | ||||
| 		i++ | ||||
| 	} else { | ||||
| 		return 0, nil | ||||
| 	} | ||||
| 	return i - start, drawer | ||||
| } | ||||
|  | ||||
| func (d *PropertyDrawer) Get(key string) (string, bool) { | ||||
| 	if d == nil { | ||||
| 		return "", false | ||||
| 	} | ||||
| 	for _, kvPair := range d.Properties { | ||||
| 		if kvPair[0] == key { | ||||
| 			return kvPair[1], true | ||||
| 		} | ||||
| 	} | ||||
| 	return "", false | ||||
| } | ||||
|  | ||||
| func (n Drawer) String() string         { return orgWriter.nodesAsString(n) } | ||||
| func (n PropertyDrawer) String() string { return orgWriter.nodesAsString(n) } | ||||
							
								
								
									
										35
									
								
								vendor/github.com/niklasfasching/go-org/org/footnote.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										35
									
								
								vendor/github.com/niklasfasching/go-org/org/footnote.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,35 @@ | ||||
| package org | ||||
|  | ||||
| import ( | ||||
| 	"regexp" | ||||
| ) | ||||
|  | ||||
| type FootnoteDefinition struct { | ||||
| 	Name     string | ||||
| 	Children []Node | ||||
| 	Inline   bool | ||||
| } | ||||
|  | ||||
| var footnoteDefinitionRegexp = regexp.MustCompile(`^\[fn:([\w-]+)\](\s+(.+)|\s*$)`) | ||||
|  | ||||
| func lexFootnoteDefinition(line string) (token, bool) { | ||||
| 	if m := footnoteDefinitionRegexp.FindStringSubmatch(line); m != nil { | ||||
| 		return token{"footnoteDefinition", 0, m[1], m}, true | ||||
| 	} | ||||
| 	return nilToken, false | ||||
| } | ||||
|  | ||||
| func (d *Document) parseFootnoteDefinition(i int, parentStop stopFn) (int, Node) { | ||||
| 	start, name := i, d.tokens[i].content | ||||
| 	d.tokens[i] = tokenize(d.tokens[i].matches[2]) | ||||
| 	stop := func(d *Document, i int) bool { | ||||
| 		return parentStop(d, i) || | ||||
| 			(isSecondBlankLine(d, i) && i > start+1) || | ||||
| 			d.tokens[i].kind == "headline" || d.tokens[i].kind == "footnoteDefinition" | ||||
| 	} | ||||
| 	consumed, nodes := d.parseMany(i, stop) | ||||
| 	definition := FootnoteDefinition{name, nodes, false} | ||||
| 	return consumed, definition | ||||
| } | ||||
|  | ||||
| func (n FootnoteDefinition) String() string { return orgWriter.nodesAsString(n) } | ||||
							
								
								
									
										27
									
								
								vendor/github.com/niklasfasching/go-org/org/fuzz.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										27
									
								
								vendor/github.com/niklasfasching/go-org/org/fuzz.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,27 @@ | ||||
| // +build gofuzz | ||||
|  | ||||
| package org | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"strings" | ||||
| ) | ||||
|  | ||||
| // Fuzz function to be used by https://github.com/dvyukov/go-fuzz | ||||
| func Fuzz(input []byte) int { | ||||
| 	conf := New().Silent() | ||||
| 	d := conf.Parse(bytes.NewReader(input), "") | ||||
| 	orgOutput, err := d.Write(NewOrgWriter()) | ||||
| 	if err != nil { | ||||
| 		panic(err) | ||||
| 	} | ||||
| 	htmlOutputA, err := d.Write(NewHTMLWriter()) | ||||
| 	if err != nil { | ||||
| 		panic(err) | ||||
| 	} | ||||
| 	htmlOutputB, err := conf.Parse(strings.NewReader(orgOutput), "").Write(NewHTMLWriter()) | ||||
| 	if htmlOutputA != htmlOutputB { | ||||
| 		panic("rendered org results in different html than original input") | ||||
| 	} | ||||
| 	return 0 | ||||
| } | ||||
							
								
								
									
										101
									
								
								vendor/github.com/niklasfasching/go-org/org/headline.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										101
									
								
								vendor/github.com/niklasfasching/go-org/org/headline.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,101 @@ | ||||
| package org | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"regexp" | ||||
| 	"strings" | ||||
| 	"unicode" | ||||
| ) | ||||
|  | ||||
| type Outline struct { | ||||
| 	*Section | ||||
| 	last  *Section | ||||
| 	count int | ||||
| } | ||||
|  | ||||
| type Section struct { | ||||
| 	Headline *Headline | ||||
| 	Parent   *Section | ||||
| 	Children []*Section | ||||
| } | ||||
|  | ||||
| type Headline struct { | ||||
| 	Index      int | ||||
| 	Lvl        int | ||||
| 	Status     string | ||||
| 	Priority   string | ||||
| 	Properties *PropertyDrawer | ||||
| 	Title      []Node | ||||
| 	Tags       []string | ||||
| 	Children   []Node | ||||
| } | ||||
|  | ||||
| var headlineRegexp = regexp.MustCompile(`^([*]+)\s+(.*)`) | ||||
| var tagRegexp = regexp.MustCompile(`(.*?)\s+(:[A-Za-z0-9_@#%:]+:\s*$)`) | ||||
|  | ||||
| func lexHeadline(line string) (token, bool) { | ||||
| 	if m := headlineRegexp.FindStringSubmatch(line); m != nil { | ||||
| 		return token{"headline", len(m[1]), m[2], m}, true | ||||
| 	} | ||||
| 	return nilToken, false | ||||
| } | ||||
|  | ||||
| func (d *Document) parseHeadline(i int, parentStop stopFn) (int, Node) { | ||||
| 	t, headline := d.tokens[i], Headline{} | ||||
| 	headline.Lvl = t.lvl | ||||
|  | ||||
| 	headline.Index = d.addHeadline(&headline) | ||||
|  | ||||
| 	text := t.content | ||||
| 	todoKeywords := strings.FieldsFunc(d.Get("TODO"), func(r rune) bool { return unicode.IsSpace(r) || r == '|' }) | ||||
| 	for _, k := range todoKeywords { | ||||
| 		if strings.HasPrefix(text, k) && len(text) > len(k) && unicode.IsSpace(rune(text[len(k)])) { | ||||
| 			headline.Status = k | ||||
| 			text = text[len(k)+1:] | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if len(text) >= 4 && text[0:2] == "[#" && strings.Contains("ABC", text[2:3]) && text[3] == ']' { | ||||
| 		headline.Priority = text[2:3] | ||||
| 		text = strings.TrimSpace(text[4:]) | ||||
| 	} | ||||
|  | ||||
| 	if m := tagRegexp.FindStringSubmatch(text); m != nil { | ||||
| 		text = m[1] | ||||
| 		headline.Tags = strings.FieldsFunc(m[2], func(r rune) bool { return r == ':' }) | ||||
| 	} | ||||
|  | ||||
| 	headline.Title = d.parseInline(text) | ||||
|  | ||||
| 	stop := func(d *Document, i int) bool { | ||||
| 		return parentStop(d, i) || d.tokens[i].kind == "headline" && d.tokens[i].lvl <= headline.Lvl | ||||
| 	} | ||||
| 	consumed, nodes := d.parseMany(i+1, stop) | ||||
| 	if len(nodes) > 0 { | ||||
| 		if d, ok := nodes[0].(PropertyDrawer); ok { | ||||
| 			headline.Properties = &d | ||||
| 			nodes = nodes[1:] | ||||
| 		} | ||||
| 	} | ||||
| 	headline.Children = nodes | ||||
| 	return consumed + 1, headline | ||||
| } | ||||
|  | ||||
| func (h Headline) ID() string { | ||||
| 	if customID, ok := h.Properties.Get("CUSTOM_ID"); ok { | ||||
| 		return customID | ||||
| 	} | ||||
| 	return fmt.Sprintf("headline-%d", h.Index) | ||||
| } | ||||
|  | ||||
| func (parent *Section) add(current *Section) { | ||||
| 	if parent.Headline == nil || parent.Headline.Lvl < current.Headline.Lvl { | ||||
| 		parent.Children = append(parent.Children, current) | ||||
| 		current.Parent = parent | ||||
| 	} else { | ||||
| 		parent.Parent.add(current) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (n Headline) String() string { return orgWriter.nodesAsString(n) } | ||||
							
								
								
									
										437
									
								
								vendor/github.com/niklasfasching/go-org/org/html_entity.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										437
									
								
								vendor/github.com/niklasfasching/go-org/org/html_entity.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,437 @@ | ||||
| package org | ||||
|  | ||||
| import "strings" | ||||
|  | ||||
| var htmlEntityReplacer *strings.Replacer | ||||
|  | ||||
| func init() { | ||||
| 	htmlEntities = append(htmlEntities, | ||||
| 		"---", "—", | ||||
| 		"--", "–", | ||||
| 		"...", "…", | ||||
| 	) | ||||
| 	htmlEntityReplacer = strings.NewReplacer(htmlEntities...) | ||||
| } | ||||
|  | ||||
| /* | ||||
| Generated & copied over using the following elisp | ||||
| (Setting up go generate seems like a waste for now - I call YAGNI on that one) | ||||
|  | ||||
| (insert (mapconcat | ||||
|          (lambda (entity) (concat "`\\" (car entity) "`, `" (nth 6 entity) "`")) ; entity -> utf8 | ||||
|          (remove-if-not 'listp org-entities) | ||||
|          ",\n")) | ||||
| */ | ||||
| var htmlEntities = []string{ | ||||
| 	`\Agrave`, `À`, | ||||
| 	`\agrave`, `à`, | ||||
| 	`\Aacute`, `Á`, | ||||
| 	`\aacute`, `á`, | ||||
| 	`\Acirc`, `Â`, | ||||
| 	`\acirc`, `â`, | ||||
| 	`\Amacr`, `Ã`, | ||||
| 	`\amacr`, `ã`, | ||||
| 	`\Atilde`, `Ã`, | ||||
| 	`\atilde`, `ã`, | ||||
| 	`\Auml`, `Ä`, | ||||
| 	`\auml`, `ä`, | ||||
| 	`\Aring`, `Å`, | ||||
| 	`\AA`, `Å`, | ||||
| 	`\aring`, `å`, | ||||
| 	`\AElig`, `Æ`, | ||||
| 	`\aelig`, `æ`, | ||||
| 	`\Ccedil`, `Ç`, | ||||
| 	`\ccedil`, `ç`, | ||||
| 	`\Egrave`, `È`, | ||||
| 	`\egrave`, `è`, | ||||
| 	`\Eacute`, `É`, | ||||
| 	`\eacute`, `é`, | ||||
| 	`\Ecirc`, `Ê`, | ||||
| 	`\ecirc`, `ê`, | ||||
| 	`\Euml`, `Ë`, | ||||
| 	`\euml`, `ë`, | ||||
| 	`\Igrave`, `Ì`, | ||||
| 	`\igrave`, `ì`, | ||||
| 	`\Iacute`, `Í`, | ||||
| 	`\iacute`, `í`, | ||||
| 	`\Icirc`, `Î`, | ||||
| 	`\icirc`, `î`, | ||||
| 	`\Iuml`, `Ï`, | ||||
| 	`\iuml`, `ï`, | ||||
| 	`\Ntilde`, `Ñ`, | ||||
| 	`\ntilde`, `ñ`, | ||||
| 	`\Ograve`, `Ò`, | ||||
| 	`\ograve`, `ò`, | ||||
| 	`\Oacute`, `Ó`, | ||||
| 	`\oacute`, `ó`, | ||||
| 	`\Ocirc`, `Ô`, | ||||
| 	`\ocirc`, `ô`, | ||||
| 	`\Otilde`, `Õ`, | ||||
| 	`\otilde`, `õ`, | ||||
| 	`\Ouml`, `Ö`, | ||||
| 	`\ouml`, `ö`, | ||||
| 	`\Oslash`, `Ø`, | ||||
| 	`\oslash`, `ø`, | ||||
| 	`\OElig`, `Œ`, | ||||
| 	`\oelig`, `œ`, | ||||
| 	`\Scaron`, `Š`, | ||||
| 	`\scaron`, `š`, | ||||
| 	`\szlig`, `ß`, | ||||
| 	`\Ugrave`, `Ù`, | ||||
| 	`\ugrave`, `ù`, | ||||
| 	`\Uacute`, `Ú`, | ||||
| 	`\uacute`, `ú`, | ||||
| 	`\Ucirc`, `Û`, | ||||
| 	`\ucirc`, `û`, | ||||
| 	`\Uuml`, `Ü`, | ||||
| 	`\uuml`, `ü`, | ||||
| 	`\Yacute`, `Ý`, | ||||
| 	`\yacute`, `ý`, | ||||
| 	`\Yuml`, `Ÿ`, | ||||
| 	`\yuml`, `ÿ`, | ||||
| 	`\fnof`, `ƒ`, | ||||
| 	`\real`, `ℜ`, | ||||
| 	`\image`, `ℑ`, | ||||
| 	`\weierp`, `℘`, | ||||
| 	`\ell`, `ℓ`, | ||||
| 	`\imath`, `ı`, | ||||
| 	`\jmath`, `ȷ`, | ||||
| 	`\Alpha`, `Α`, | ||||
| 	`\alpha`, `α`, | ||||
| 	`\Beta`, `Β`, | ||||
| 	`\beta`, `β`, | ||||
| 	`\Gamma`, `Γ`, | ||||
| 	`\gamma`, `γ`, | ||||
| 	`\Delta`, `Δ`, | ||||
| 	`\delta`, `δ`, | ||||
| 	`\Epsilon`, `Ε`, | ||||
| 	`\epsilon`, `ε`, | ||||
| 	`\varepsilon`, `ε`, | ||||
| 	`\Zeta`, `Ζ`, | ||||
| 	`\zeta`, `ζ`, | ||||
| 	`\Eta`, `Η`, | ||||
| 	`\eta`, `η`, | ||||
| 	`\Theta`, `Θ`, | ||||
| 	`\theta`, `θ`, | ||||
| 	`\thetasym`, `ϑ`, | ||||
| 	`\vartheta`, `ϑ`, | ||||
| 	`\Iota`, `Ι`, | ||||
| 	`\iota`, `ι`, | ||||
| 	`\Kappa`, `Κ`, | ||||
| 	`\kappa`, `κ`, | ||||
| 	`\Lambda`, `Λ`, | ||||
| 	`\lambda`, `λ`, | ||||
| 	`\Mu`, `Μ`, | ||||
| 	`\mu`, `μ`, | ||||
| 	`\nu`, `ν`, | ||||
| 	`\Nu`, `Ν`, | ||||
| 	`\Xi`, `Ξ`, | ||||
| 	`\xi`, `ξ`, | ||||
| 	`\Omicron`, `Ο`, | ||||
| 	`\omicron`, `ο`, | ||||
| 	`\Pi`, `Π`, | ||||
| 	`\pi`, `π`, | ||||
| 	`\Rho`, `Ρ`, | ||||
| 	`\rho`, `ρ`, | ||||
| 	`\Sigma`, `Σ`, | ||||
| 	`\sigma`, `σ`, | ||||
| 	`\sigmaf`, `ς`, | ||||
| 	`\varsigma`, `ς`, | ||||
| 	`\Tau`, `Τ`, | ||||
| 	`\Upsilon`, `Υ`, | ||||
| 	`\upsih`, `ϒ`, | ||||
| 	`\upsilon`, `υ`, | ||||
| 	`\Phi`, `Φ`, | ||||
| 	`\phi`, `ɸ`, | ||||
| 	`\varphi`, `φ`, | ||||
| 	`\Chi`, `Χ`, | ||||
| 	`\chi`, `χ`, | ||||
| 	`\acutex`, `𝑥́`, | ||||
| 	`\Psi`, `Ψ`, | ||||
| 	`\psi`, `ψ`, | ||||
| 	`\tau`, `τ`, | ||||
| 	`\Omega`, `Ω`, | ||||
| 	`\omega`, `ω`, | ||||
| 	`\piv`, `ϖ`, | ||||
| 	`\varpi`, `ϖ`, | ||||
| 	`\partial`, `∂`, | ||||
| 	`\alefsym`, `ℵ`, | ||||
| 	`\aleph`, `ℵ`, | ||||
| 	`\gimel`, `ℷ`, | ||||
| 	`\beth`, `ב`, | ||||
| 	`\dalet`, `ד`, | ||||
| 	`\ETH`, `Ð`, | ||||
| 	`\eth`, `ð`, | ||||
| 	`\THORN`, `Þ`, | ||||
| 	`\thorn`, `þ`, | ||||
| 	`\dots`, `…`, | ||||
| 	`\cdots`, `⋯`, | ||||
| 	`\hellip`, `…`, | ||||
| 	`\middot`, `·`, | ||||
| 	`\iexcl`, `¡`, | ||||
| 	`\iquest`, `¿`, | ||||
| 	`\shy`, ``, | ||||
| 	`\ndash`, `–`, | ||||
| 	`\mdash`, `—`, | ||||
| 	`\quot`, `"`, | ||||
| 	`\acute`, `´`, | ||||
| 	`\ldquo`, `“`, | ||||
| 	`\rdquo`, `”`, | ||||
| 	`\bdquo`, `„`, | ||||
| 	`\lsquo`, `‘`, | ||||
| 	`\rsquo`, `’`, | ||||
| 	`\sbquo`, `‚`, | ||||
| 	`\laquo`, `«`, | ||||
| 	`\raquo`, `»`, | ||||
| 	`\lsaquo`, `‹`, | ||||
| 	`\rsaquo`, `›`, | ||||
| 	`\circ`, `∘`, | ||||
| 	`\vert`, `|`, | ||||
| 	`\vbar`, `|`, | ||||
| 	`\brvbar`, `¦`, | ||||
| 	`\S`, `§`, | ||||
| 	`\sect`, `§`, | ||||
| 	`\amp`, `&`, | ||||
| 	`\lt`, `<`, | ||||
| 	`\gt`, `>`, | ||||
| 	`\tilde`, `~`, | ||||
| 	`\slash`, `/`, | ||||
| 	`\plus`, `+`, | ||||
| 	`\under`, `_`, | ||||
| 	`\equal`, `=`, | ||||
| 	`\asciicirc`, `^`, | ||||
| 	`\dagger`, `†`, | ||||
| 	`\dag`, `†`, | ||||
| 	`\Dagger`, `‡`, | ||||
| 	`\ddag`, `‡`, | ||||
| 	`\nbsp`, ` `, | ||||
| 	`\ensp`, ` `, | ||||
| 	`\emsp`, ` `, | ||||
| 	`\thinsp`, ` `, | ||||
| 	`\curren`, `¤`, | ||||
| 	`\cent`, `¢`, | ||||
| 	`\pound`, `£`, | ||||
| 	`\yen`, `¥`, | ||||
| 	`\euro`, `€`, | ||||
| 	`\EUR`, `€`, | ||||
| 	`\dollar`, `$`, | ||||
| 	`\USD`, `$`, | ||||
| 	`\copy`, `©`, | ||||
| 	`\reg`, `®`, | ||||
| 	`\trade`, `™`, | ||||
| 	`\minus`, `−`, | ||||
| 	`\pm`, `±`, | ||||
| 	`\plusmn`, `±`, | ||||
| 	`\times`, `×`, | ||||
| 	`\frasl`, `⁄`, | ||||
| 	`\colon`, `:`, | ||||
| 	`\div`, `÷`, | ||||
| 	`\frac12`, `½`, | ||||
| 	`\frac14`, `¼`, | ||||
| 	`\frac34`, `¾`, | ||||
| 	`\permil`, `‰`, | ||||
| 	`\sup1`, `¹`, | ||||
| 	`\sup2`, `²`, | ||||
| 	`\sup3`, `³`, | ||||
| 	`\radic`, `√`, | ||||
| 	`\sum`, `∑`, | ||||
| 	`\prod`, `∏`, | ||||
| 	`\micro`, `µ`, | ||||
| 	`\macr`, `¯`, | ||||
| 	`\deg`, `°`, | ||||
| 	`\prime`, `′`, | ||||
| 	`\Prime`, `″`, | ||||
| 	`\infin`, `∞`, | ||||
| 	`\infty`, `∞`, | ||||
| 	`\prop`, `∝`, | ||||
| 	`\propto`, `∝`, | ||||
| 	`\not`, `¬`, | ||||
| 	`\neg`, `¬`, | ||||
| 	`\land`, `∧`, | ||||
| 	`\wedge`, `∧`, | ||||
| 	`\lor`, `∨`, | ||||
| 	`\vee`, `∨`, | ||||
| 	`\cap`, `∩`, | ||||
| 	`\cup`, `∪`, | ||||
| 	`\smile`, `⌣`, | ||||
| 	`\frown`, `⌢`, | ||||
| 	`\int`, `∫`, | ||||
| 	`\therefore`, `∴`, | ||||
| 	`\there4`, `∴`, | ||||
| 	`\because`, `∵`, | ||||
| 	`\sim`, `∼`, | ||||
| 	`\cong`, `≅`, | ||||
| 	`\simeq`, `≅`, | ||||
| 	`\asymp`, `≈`, | ||||
| 	`\approx`, `≈`, | ||||
| 	`\ne`, `≠`, | ||||
| 	`\neq`, `≠`, | ||||
| 	`\equiv`, `≡`, | ||||
| 	`\triangleq`, `≜`, | ||||
| 	`\le`, `≤`, | ||||
| 	`\leq`, `≤`, | ||||
| 	`\ge`, `≥`, | ||||
| 	`\geq`, `≥`, | ||||
| 	`\lessgtr`, `≶`, | ||||
| 	`\lesseqgtr`, `⋚`, | ||||
| 	`\ll`, `≪`, | ||||
| 	`\Ll`, `⋘`, | ||||
| 	`\lll`, `⋘`, | ||||
| 	`\gg`, `≫`, | ||||
| 	`\Gg`, `⋙`, | ||||
| 	`\ggg`, `⋙`, | ||||
| 	`\prec`, `≺`, | ||||
| 	`\preceq`, `≼`, | ||||
| 	`\preccurlyeq`, `≼`, | ||||
| 	`\succ`, `≻`, | ||||
| 	`\succeq`, `≽`, | ||||
| 	`\succcurlyeq`, `≽`, | ||||
| 	`\sub`, `⊂`, | ||||
| 	`\subset`, `⊂`, | ||||
| 	`\sup`, `⊃`, | ||||
| 	`\supset`, `⊃`, | ||||
| 	`\nsub`, `⊄`, | ||||
| 	`\sube`, `⊆`, | ||||
| 	`\nsup`, `⊅`, | ||||
| 	`\supe`, `⊇`, | ||||
| 	`\setminus`, `⧵`, | ||||
| 	`\forall`, `∀`, | ||||
| 	`\exist`, `∃`, | ||||
| 	`\exists`, `∃`, | ||||
| 	`\nexist`, `∄`, | ||||
| 	`\nexists`, `∄`, | ||||
| 	`\empty`, `∅`, | ||||
| 	`\emptyset`, `∅`, | ||||
| 	`\isin`, `∈`, | ||||
| 	`\in`, `∈`, | ||||
| 	`\notin`, `∉`, | ||||
| 	`\ni`, `∋`, | ||||
| 	`\nabla`, `∇`, | ||||
| 	`\ang`, `∠`, | ||||
| 	`\angle`, `∠`, | ||||
| 	`\perp`, `⊥`, | ||||
| 	`\parallel`, `∥`, | ||||
| 	`\sdot`, `⋅`, | ||||
| 	`\cdot`, `⋅`, | ||||
| 	`\lceil`, `⌈`, | ||||
| 	`\rceil`, `⌉`, | ||||
| 	`\lfloor`, `⌊`, | ||||
| 	`\rfloor`, `⌋`, | ||||
| 	`\lang`, `⟨`, | ||||
| 	`\rang`, `⟩`, | ||||
| 	`\langle`, `⟨`, | ||||
| 	`\rangle`, `⟩`, | ||||
| 	`\hbar`, `ℏ`, | ||||
| 	`\mho`, `℧`, | ||||
| 	`\larr`, `←`, | ||||
| 	`\leftarrow`, `←`, | ||||
| 	`\gets`, `←`, | ||||
| 	`\lArr`, `⇐`, | ||||
| 	`\Leftarrow`, `⇐`, | ||||
| 	`\uarr`, `↑`, | ||||
| 	`\uparrow`, `↑`, | ||||
| 	`\uArr`, `⇑`, | ||||
| 	`\Uparrow`, `⇑`, | ||||
| 	`\rarr`, `→`, | ||||
| 	`\to`, `→`, | ||||
| 	`\rightarrow`, `→`, | ||||
| 	`\rArr`, `⇒`, | ||||
| 	`\Rightarrow`, `⇒`, | ||||
| 	`\darr`, `↓`, | ||||
| 	`\downarrow`, `↓`, | ||||
| 	`\dArr`, `⇓`, | ||||
| 	`\Downarrow`, `⇓`, | ||||
| 	`\harr`, `↔`, | ||||
| 	`\leftrightarrow`, `↔`, | ||||
| 	`\hArr`, `⇔`, | ||||
| 	`\Leftrightarrow`, `⇔`, | ||||
| 	`\crarr`, `↵`, | ||||
| 	`\hookleftarrow`, `↵`, | ||||
| 	`\arccos`, `arccos`, | ||||
| 	`\arcsin`, `arcsin`, | ||||
| 	`\arctan`, `arctan`, | ||||
| 	`\arg`, `arg`, | ||||
| 	`\cos`, `cos`, | ||||
| 	`\cosh`, `cosh`, | ||||
| 	`\cot`, `cot`, | ||||
| 	`\coth`, `coth`, | ||||
| 	`\csc`, `csc`, | ||||
| 	`\deg`, `deg`, | ||||
| 	`\det`, `det`, | ||||
| 	`\dim`, `dim`, | ||||
| 	`\exp`, `exp`, | ||||
| 	`\gcd`, `gcd`, | ||||
| 	`\hom`, `hom`, | ||||
| 	`\inf`, `inf`, | ||||
| 	`\ker`, `ker`, | ||||
| 	`\lg`, `lg`, | ||||
| 	`\lim`, `lim`, | ||||
| 	`\liminf`, `liminf`, | ||||
| 	`\limsup`, `limsup`, | ||||
| 	`\ln`, `ln`, | ||||
| 	`\log`, `log`, | ||||
| 	`\max`, `max`, | ||||
| 	`\min`, `min`, | ||||
| 	`\Pr`, `Pr`, | ||||
| 	`\sec`, `sec`, | ||||
| 	`\sin`, `sin`, | ||||
| 	`\sinh`, `sinh`, | ||||
| 	`\sup`, `sup`, | ||||
| 	`\tan`, `tan`, | ||||
| 	`\tanh`, `tanh`, | ||||
| 	`\bull`, `•`, | ||||
| 	`\bullet`, `•`, | ||||
| 	`\star`, `⋆`, | ||||
| 	`\lowast`, `∗`, | ||||
| 	`\ast`, `*`, | ||||
| 	`\odot`, `ʘ`, | ||||
| 	`\oplus`, `⊕`, | ||||
| 	`\otimes`, `⊗`, | ||||
| 	`\check`, `✓`, | ||||
| 	`\checkmark`, `✓`, | ||||
| 	`\para`, `¶`, | ||||
| 	`\ordf`, `ª`, | ||||
| 	`\ordm`, `º`, | ||||
| 	`\cedil`, `¸`, | ||||
| 	`\oline`, `‾`, | ||||
| 	`\uml`, `¨`, | ||||
| 	`\zwnj`, ``, | ||||
| 	`\zwj`, ``, | ||||
| 	`\lrm`, ``, | ||||
| 	`\rlm`, ``, | ||||
| 	`\smiley`, `☺`, | ||||
| 	`\blacksmile`, `☻`, | ||||
| 	`\sad`, `☹`, | ||||
| 	`\frowny`, `☹`, | ||||
| 	`\clubs`, `♣`, | ||||
| 	`\clubsuit`, `♣`, | ||||
| 	`\spades`, `♠`, | ||||
| 	`\spadesuit`, `♠`, | ||||
| 	`\hearts`, `♥`, | ||||
| 	`\heartsuit`, `♥`, | ||||
| 	`\diams`, `◆`, | ||||
| 	`\diamondsuit`, `◆`, | ||||
| 	`\diamond`, `◆`, | ||||
| 	`\Diamond`, `◆`, | ||||
| 	`\loz`, `⧫`, | ||||
| 	`\_ `, ` `, | ||||
| 	`\_  `, `  `, | ||||
| 	`\_   `, `   `, | ||||
| 	`\_    `, `    `, | ||||
| 	`\_     `, `     `, | ||||
| 	`\_      `, `      `, | ||||
| 	`\_       `, `       `, | ||||
| 	`\_        `, `        `, | ||||
| 	`\_         `, `         `, | ||||
| 	`\_          `, `          `, | ||||
| 	`\_           `, `           `, | ||||
| 	`\_            `, `            `, | ||||
| 	`\_             `, `             `, | ||||
| 	`\_              `, `              `, | ||||
| 	`\_               `, `               `, | ||||
| 	`\_                `, `                `, | ||||
| 	`\_                 `, `                 `, | ||||
| 	`\_                  `, `                  `, | ||||
| 	`\_                   `, `                   `, | ||||
| 	`\_                    `, `                    `, | ||||
| } | ||||
							
								
								
									
										504
									
								
								vendor/github.com/niklasfasching/go-org/org/html_writer.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										504
									
								
								vendor/github.com/niklasfasching/go-org/org/html_writer.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,504 @@ | ||||
| package org | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"html" | ||||
| 	"log" | ||||
| 	"regexp" | ||||
| 	"strings" | ||||
| 	"unicode" | ||||
|  | ||||
| 	h "golang.org/x/net/html" | ||||
| 	"golang.org/x/net/html/atom" | ||||
| ) | ||||
|  | ||||
| // HTMLWriter exports an org document into a html document. | ||||
| type HTMLWriter struct { | ||||
| 	ExtendingWriter    Writer | ||||
| 	HighlightCodeBlock func(source, lang string) string | ||||
|  | ||||
| 	strings.Builder | ||||
| 	document   *Document | ||||
| 	htmlEscape bool | ||||
| 	log        *log.Logger | ||||
| 	footnotes  *footnotes | ||||
| } | ||||
|  | ||||
| type footnotes struct { | ||||
| 	mapping map[string]int | ||||
| 	list    []*FootnoteDefinition | ||||
| } | ||||
|  | ||||
| var emphasisTags = map[string][]string{ | ||||
| 	"/":   []string{"<em>", "</em>"}, | ||||
| 	"*":   []string{"<strong>", "</strong>"}, | ||||
| 	"+":   []string{"<del>", "</del>"}, | ||||
| 	"~":   []string{"<code>", "</code>"}, | ||||
| 	"=":   []string{`<code class="verbatim">`, "</code>"}, | ||||
| 	"_":   []string{`<span style="text-decoration: underline;">`, "</span>"}, | ||||
| 	"_{}": []string{"<sub>", "</sub>"}, | ||||
| 	"^{}": []string{"<sup>", "</sup>"}, | ||||
| } | ||||
|  | ||||
| var listTags = map[string][]string{ | ||||
| 	"unordered":   []string{"<ul>", "</ul>"}, | ||||
| 	"ordered":     []string{"<ol>", "</ol>"}, | ||||
| 	"descriptive": []string{"<dl>", "</dl>"}, | ||||
| } | ||||
|  | ||||
| var listItemStatuses = map[string]string{ | ||||
| 	" ": "unchecked", | ||||
| 	"-": "indeterminate", | ||||
| 	"X": "checked", | ||||
| } | ||||
|  | ||||
| var cleanHeadlineTitleForHTMLAnchorRegexp = regexp.MustCompile(`</?a[^>]*>`) // nested a tags are not valid HTML | ||||
|  | ||||
| func NewHTMLWriter() *HTMLWriter { | ||||
| 	defaultConfig := New() | ||||
| 	return &HTMLWriter{ | ||||
| 		document:   &Document{Configuration: defaultConfig}, | ||||
| 		log:        defaultConfig.Log, | ||||
| 		htmlEscape: true, | ||||
| 		HighlightCodeBlock: func(source, lang string) string { | ||||
| 			return fmt.Sprintf("<div class=\"highlight\">\n<pre>\n%s\n</pre>\n</div>", html.EscapeString(source)) | ||||
| 		}, | ||||
| 		footnotes: &footnotes{ | ||||
| 			mapping: map[string]int{}, | ||||
| 		}, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (w *HTMLWriter) emptyClone() *HTMLWriter { | ||||
| 	wcopy := *w | ||||
| 	wcopy.Builder = strings.Builder{} | ||||
| 	return &wcopy | ||||
| } | ||||
|  | ||||
| func (w *HTMLWriter) nodesAsString(nodes ...Node) string { | ||||
| 	tmp := w.emptyClone() | ||||
| 	WriteNodes(tmp, nodes...) | ||||
| 	return tmp.String() | ||||
| } | ||||
|  | ||||
| func (w *HTMLWriter) WriterWithExtensions() Writer { | ||||
| 	if w.ExtendingWriter != nil { | ||||
| 		return w.ExtendingWriter | ||||
| 	} | ||||
| 	return w | ||||
| } | ||||
|  | ||||
| func (w *HTMLWriter) Before(d *Document) { | ||||
| 	w.document = d | ||||
| 	w.log = d.Log | ||||
| 	w.WriteOutline(d) | ||||
| } | ||||
|  | ||||
| func (w *HTMLWriter) After(d *Document) { | ||||
| 	w.WriteFootnotes(d) | ||||
| } | ||||
|  | ||||
| func (w *HTMLWriter) WriteComment(Comment)               {} | ||||
| func (w *HTMLWriter) WritePropertyDrawer(PropertyDrawer) {} | ||||
|  | ||||
| func (w *HTMLWriter) WriteBlock(b Block) { | ||||
| 	content := "" | ||||
| 	if isRawTextBlock(b.Name) { | ||||
| 		exportWriter := w.emptyClone() | ||||
| 		exportWriter.htmlEscape = false | ||||
| 		WriteNodes(exportWriter, b.Children...) | ||||
| 		content = strings.TrimRightFunc(exportWriter.String(), unicode.IsSpace) | ||||
| 	} else { | ||||
| 		content = w.nodesAsString(b.Children...) | ||||
| 	} | ||||
| 	switch name := b.Name; { | ||||
| 	case name == "SRC": | ||||
| 		lang := "text" | ||||
| 		if len(b.Parameters) >= 1 { | ||||
| 			lang = strings.ToLower(b.Parameters[0]) | ||||
| 		} | ||||
| 		content = w.HighlightCodeBlock(content, lang) | ||||
| 		w.WriteString(fmt.Sprintf("<div class=\"src src-%s\">\n%s\n</div>\n", lang, content)) | ||||
| 	case name == "EXAMPLE": | ||||
| 		w.WriteString(`<pre class="example">` + "\n" + content + "\n</pre>\n") | ||||
| 	case name == "EXPORT" && len(b.Parameters) >= 1 && strings.ToLower(b.Parameters[0]) == "html": | ||||
| 		w.WriteString(content + "\n") | ||||
| 	case name == "QUOTE": | ||||
| 		w.WriteString("<blockquote>\n" + content + "</blockquote>\n") | ||||
| 	case name == "CENTER": | ||||
| 		w.WriteString(`<div class="center-block" style="text-align: center; margin-left: auto; margin-right: auto;">` + "\n") | ||||
| 		w.WriteString(content + "</div>\n") | ||||
| 	default: | ||||
| 		w.WriteString(fmt.Sprintf(`<div class="%s-block">`, strings.ToLower(b.Name)) + "\n") | ||||
| 		w.WriteString(content + "</div>\n") | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (w *HTMLWriter) WriteDrawer(d Drawer) { | ||||
| 	WriteNodes(w, d.Children...) | ||||
| } | ||||
|  | ||||
| func (w *HTMLWriter) WriteKeyword(k Keyword) { | ||||
| 	if k.Key == "HTML" { | ||||
| 		w.WriteString(k.Value + "\n") | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (w *HTMLWriter) WriteInclude(i Include) { | ||||
| 	WriteNodes(w, i.Resolve()) | ||||
| } | ||||
|  | ||||
| func (w *HTMLWriter) WriteFootnoteDefinition(f FootnoteDefinition) { | ||||
| 	w.footnotes.updateDefinition(f) | ||||
| } | ||||
|  | ||||
| func (w *HTMLWriter) WriteFootnotes(d *Document) { | ||||
| 	if !w.document.GetOption("f") || len(w.footnotes.list) == 0 { | ||||
| 		return | ||||
| 	} | ||||
| 	w.WriteString(`<div class="footnotes">` + "\n") | ||||
| 	w.WriteString(`<hr class="footnotes-separatator">` + "\n") | ||||
| 	w.WriteString(`<div class="footnote-definitions">` + "\n") | ||||
| 	for i, definition := range w.footnotes.list { | ||||
| 		id := i + 1 | ||||
| 		if definition == nil { | ||||
| 			name := "" | ||||
| 			for k, v := range w.footnotes.mapping { | ||||
| 				if v == i { | ||||
| 					name = k | ||||
| 				} | ||||
| 			} | ||||
| 			w.log.Printf("Missing footnote definition for [fn:%s] (#%d)", name, id) | ||||
| 			continue | ||||
| 		} | ||||
| 		w.WriteString(`<div class="footnote-definition">` + "\n") | ||||
| 		w.WriteString(fmt.Sprintf(`<sup id="footnote-%d"><a href="#footnote-reference-%d">%d</a></sup>`, id, id, id) + "\n") | ||||
| 		w.WriteString(`<div class="footnote-body">` + "\n") | ||||
| 		WriteNodes(w, definition.Children...) | ||||
| 		w.WriteString("</div>\n</div>\n") | ||||
| 	} | ||||
| 	w.WriteString("</div>\n</div>\n") | ||||
| } | ||||
|  | ||||
| func (w *HTMLWriter) WriteOutline(d *Document) { | ||||
| 	if w.document.GetOption("toc") && len(d.Outline.Children) != 0 { | ||||
| 		w.WriteString("<nav>\n<ul>\n") | ||||
| 		for _, section := range d.Outline.Children { | ||||
| 			w.writeSection(section) | ||||
| 		} | ||||
| 		w.WriteString("</ul>\n</nav>\n") | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (w *HTMLWriter) writeSection(section *Section) { | ||||
| 	// NOTE: To satisfy hugo ExtractTOC() check we cannot use `<li>\n` here. Doesn't really matter, just a note. | ||||
| 	w.WriteString("<li>") | ||||
| 	h := section.Headline | ||||
| 	title := cleanHeadlineTitleForHTMLAnchorRegexp.ReplaceAllString(w.nodesAsString(h.Title...), "") | ||||
| 	w.WriteString(fmt.Sprintf("<a href=\"#%s\">%s</a>\n", h.ID(), title)) | ||||
| 	if len(section.Children) != 0 { | ||||
| 		w.WriteString("<ul>\n") | ||||
| 		for _, section := range section.Children { | ||||
| 			w.writeSection(section) | ||||
| 		} | ||||
| 		w.WriteString("</ul>\n") | ||||
| 	} | ||||
| 	w.WriteString("</li>\n") | ||||
| } | ||||
|  | ||||
| func (w *HTMLWriter) WriteHeadline(h Headline) { | ||||
| 	for _, excludeTag := range strings.Fields(w.document.Get("EXCLUDE_TAGS")) { | ||||
| 		for _, tag := range h.Tags { | ||||
| 			if excludeTag == tag { | ||||
| 				return | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	w.WriteString(fmt.Sprintf(`<h%d id="%s">`, h.Lvl, h.ID()) + "\n") | ||||
| 	if w.document.GetOption("todo") && h.Status != "" { | ||||
| 		w.WriteString(fmt.Sprintf(`<span class="todo">%s</span>`, h.Status) + "\n") | ||||
| 	} | ||||
| 	if w.document.GetOption("pri") && h.Priority != "" { | ||||
| 		w.WriteString(fmt.Sprintf(`<span class="priority">[%s]</span>`, h.Priority) + "\n") | ||||
| 	} | ||||
|  | ||||
| 	WriteNodes(w, h.Title...) | ||||
| 	if w.document.GetOption("tags") && len(h.Tags) != 0 { | ||||
| 		tags := make([]string, len(h.Tags)) | ||||
| 		for i, tag := range h.Tags { | ||||
| 			tags[i] = fmt.Sprintf(`<span>%s</span>`, tag) | ||||
| 		} | ||||
| 		w.WriteString("   ") | ||||
| 		w.WriteString(fmt.Sprintf(`<span class="tags">%s</span>`, strings.Join(tags, " "))) | ||||
| 	} | ||||
| 	w.WriteString(fmt.Sprintf("\n</h%d>\n", h.Lvl)) | ||||
| 	WriteNodes(w, h.Children...) | ||||
| } | ||||
|  | ||||
| func (w *HTMLWriter) WriteText(t Text) { | ||||
| 	if !w.htmlEscape { | ||||
| 		w.WriteString(t.Content) | ||||
| 	} else if !w.document.GetOption("e") || t.IsRaw { | ||||
| 		w.WriteString(html.EscapeString(t.Content)) | ||||
| 	} else { | ||||
| 		w.WriteString(html.EscapeString(htmlEntityReplacer.Replace(t.Content))) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (w *HTMLWriter) WriteEmphasis(e Emphasis) { | ||||
| 	tags, ok := emphasisTags[e.Kind] | ||||
| 	if !ok { | ||||
| 		panic(fmt.Sprintf("bad emphasis %#v", e)) | ||||
| 	} | ||||
| 	w.WriteString(tags[0]) | ||||
| 	WriteNodes(w, e.Content...) | ||||
| 	w.WriteString(tags[1]) | ||||
| } | ||||
|  | ||||
| func (w *HTMLWriter) WriteLatexFragment(l LatexFragment) { | ||||
| 	w.WriteString(l.OpeningPair) | ||||
| 	WriteNodes(w, l.Content...) | ||||
| 	w.WriteString(l.ClosingPair) | ||||
| } | ||||
|  | ||||
| func (w *HTMLWriter) WriteStatisticToken(s StatisticToken) { | ||||
| 	w.WriteString(fmt.Sprintf(`<code class="statistic">[%s]</code>`, s.Content)) | ||||
| } | ||||
|  | ||||
| func (w *HTMLWriter) WriteLineBreak(l LineBreak) { | ||||
| 	w.WriteString(strings.Repeat("\n", l.Count)) | ||||
| } | ||||
|  | ||||
| func (w *HTMLWriter) WriteExplicitLineBreak(l ExplicitLineBreak) { | ||||
| 	w.WriteString("<br>\n") | ||||
| } | ||||
|  | ||||
| func (w *HTMLWriter) WriteFootnoteLink(l FootnoteLink) { | ||||
| 	if !w.document.GetOption("f") { | ||||
| 		return | ||||
| 	} | ||||
| 	i := w.footnotes.add(l) | ||||
| 	id := i + 1 | ||||
| 	w.WriteString(fmt.Sprintf(`<sup class="footnote-reference"><a id="footnote-reference-%d" href="#footnote-%d">%d</a></sup>`, id, id, id)) | ||||
| } | ||||
|  | ||||
| func (w *HTMLWriter) WriteTimestamp(t Timestamp) { | ||||
| 	if !w.document.GetOption("<") { | ||||
| 		return | ||||
| 	} | ||||
| 	w.WriteString(`<span class="timestamp"><`) | ||||
| 	if t.IsDate { | ||||
| 		w.WriteString(t.Time.Format(datestampFormat)) | ||||
| 	} else { | ||||
| 		w.WriteString(t.Time.Format(timestampFormat)) | ||||
| 	} | ||||
| 	if t.Interval != "" { | ||||
| 		w.WriteString(" " + t.Interval) | ||||
| 	} | ||||
| 	w.WriteString(`></span>`) | ||||
| } | ||||
|  | ||||
| func (w *HTMLWriter) WriteRegularLink(l RegularLink) { | ||||
| 	url := html.EscapeString(l.URL) | ||||
| 	if l.Protocol == "file" { | ||||
| 		url = url[len("file:"):] | ||||
| 	} | ||||
| 	description := url | ||||
| 	if l.Description != nil { | ||||
| 		description = w.nodesAsString(l.Description...) | ||||
| 	} | ||||
| 	switch l.Kind() { | ||||
| 	case "image": | ||||
| 		w.WriteString(fmt.Sprintf(`<img src="%s" alt="%s" title="%s" />`, url, description, description)) | ||||
| 	case "video": | ||||
| 		w.WriteString(fmt.Sprintf(`<video src="%s" title="%s">%s</video>`, url, description, description)) | ||||
| 	default: | ||||
| 		w.WriteString(fmt.Sprintf(`<a href="%s">%s</a>`, url, description)) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (w *HTMLWriter) WriteList(l List) { | ||||
| 	tags, ok := listTags[l.Kind] | ||||
| 	if !ok { | ||||
| 		panic(fmt.Sprintf("bad list kind %#v", l)) | ||||
| 	} | ||||
| 	w.WriteString(tags[0] + "\n") | ||||
| 	WriteNodes(w, l.Items...) | ||||
| 	w.WriteString(tags[1] + "\n") | ||||
| } | ||||
|  | ||||
| func (w *HTMLWriter) WriteListItem(li ListItem) { | ||||
| 	if li.Status != "" { | ||||
| 		w.WriteString(fmt.Sprintf("<li class=\"%s\">\n", listItemStatuses[li.Status])) | ||||
| 	} else { | ||||
| 		w.WriteString("<li>\n") | ||||
| 	} | ||||
| 	WriteNodes(w, li.Children...) | ||||
| 	w.WriteString("</li>\n") | ||||
| } | ||||
|  | ||||
| func (w *HTMLWriter) WriteDescriptiveListItem(di DescriptiveListItem) { | ||||
| 	if di.Status != "" { | ||||
| 		w.WriteString(fmt.Sprintf("<dt class=\"%s\">\n", listItemStatuses[di.Status])) | ||||
| 	} else { | ||||
| 		w.WriteString("<dt>\n") | ||||
| 	} | ||||
|  | ||||
| 	if len(di.Term) != 0 { | ||||
| 		WriteNodes(w, di.Term...) | ||||
| 	} else { | ||||
| 		w.WriteString("?") | ||||
| 	} | ||||
| 	w.WriteString("\n</dt>\n") | ||||
| 	w.WriteString("<dd>\n") | ||||
| 	WriteNodes(w, di.Details...) | ||||
| 	w.WriteString("</dd>\n") | ||||
| } | ||||
|  | ||||
| func (w *HTMLWriter) WriteParagraph(p Paragraph) { | ||||
| 	if len(p.Children) == 0 { | ||||
| 		return | ||||
| 	} | ||||
| 	w.WriteString("<p>") | ||||
| 	if _, ok := p.Children[0].(LineBreak); !ok { | ||||
| 		w.WriteString("\n") | ||||
| 	} | ||||
| 	WriteNodes(w, p.Children...) | ||||
| 	w.WriteString("\n</p>\n") | ||||
| } | ||||
|  | ||||
| func (w *HTMLWriter) WriteExample(e Example) { | ||||
| 	w.WriteString(`<pre class="example">` + "\n") | ||||
| 	if len(e.Children) != 0 { | ||||
| 		for _, n := range e.Children { | ||||
| 			WriteNodes(w, n) | ||||
| 			w.WriteString("\n") | ||||
| 		} | ||||
| 	} | ||||
| 	w.WriteString("</pre>\n") | ||||
| } | ||||
|  | ||||
| func (w *HTMLWriter) WriteHorizontalRule(h HorizontalRule) { | ||||
| 	w.WriteString("<hr>\n") | ||||
| } | ||||
|  | ||||
| func (w *HTMLWriter) WriteNodeWithMeta(n NodeWithMeta) { | ||||
| 	out := w.nodesAsString(n.Node) | ||||
| 	if p, ok := n.Node.(Paragraph); ok { | ||||
| 		if len(p.Children) == 1 && isImageOrVideoLink(p.Children[0]) { | ||||
| 			out = w.nodesAsString(p.Children[0]) | ||||
| 		} | ||||
| 	} | ||||
| 	for _, attributes := range n.Meta.HTMLAttributes { | ||||
| 		out = w.withHTMLAttributes(out, attributes...) + "\n" | ||||
| 	} | ||||
| 	if len(n.Meta.Caption) != 0 { | ||||
| 		caption := "" | ||||
| 		for i, ns := range n.Meta.Caption { | ||||
| 			if i != 0 { | ||||
| 				caption += " " | ||||
| 			} | ||||
| 			caption += w.nodesAsString(ns...) | ||||
| 		} | ||||
| 		out = fmt.Sprintf("<figure>\n%s<figcaption>\n%s\n</figcaption>\n</figure>\n", out, caption) | ||||
| 	} | ||||
| 	w.WriteString(out) | ||||
| } | ||||
|  | ||||
| func (w *HTMLWriter) WriteNodeWithName(n NodeWithName) { | ||||
| 	WriteNodes(w, n.Node) | ||||
| } | ||||
|  | ||||
| func (w *HTMLWriter) WriteTable(t Table) { | ||||
| 	w.WriteString("<table>\n") | ||||
| 	beforeFirstContentRow := true | ||||
| 	for i, row := range t.Rows { | ||||
| 		if row.IsSpecial || len(row.Columns) == 0 { | ||||
| 			continue | ||||
| 		} | ||||
| 		if beforeFirstContentRow { | ||||
| 			beforeFirstContentRow = false | ||||
| 			if i+1 < len(t.Rows) && len(t.Rows[i+1].Columns) == 0 { | ||||
| 				w.WriteString("<thead>\n") | ||||
| 				w.writeTableColumns(row.Columns, "th") | ||||
| 				w.WriteString("</thead>\n<tbody>\n") | ||||
| 				continue | ||||
| 			} else { | ||||
| 				w.WriteString("<tbody>\n") | ||||
| 			} | ||||
| 		} | ||||
| 		w.writeTableColumns(row.Columns, "td") | ||||
| 	} | ||||
| 	w.WriteString("</tbody>\n</table>\n") | ||||
| } | ||||
|  | ||||
| func (w *HTMLWriter) writeTableColumns(columns []Column, tag string) { | ||||
| 	w.WriteString("<tr>\n") | ||||
| 	for _, column := range columns { | ||||
| 		if column.Align == "" { | ||||
| 			w.WriteString(fmt.Sprintf("<%s>", tag)) | ||||
| 		} else { | ||||
| 			w.WriteString(fmt.Sprintf(`<%s class="align-%s">`, tag, column.Align)) | ||||
| 		} | ||||
| 		WriteNodes(w, column.Children...) | ||||
| 		w.WriteString(fmt.Sprintf("</%s>\n", tag)) | ||||
| 	} | ||||
| 	w.WriteString("</tr>\n") | ||||
| } | ||||
|  | ||||
| func (w *HTMLWriter) withHTMLAttributes(input string, kvs ...string) string { | ||||
| 	if len(kvs)%2 != 0 { | ||||
| 		w.log.Printf("withHTMLAttributes: Len of kvs must be even: %#v", kvs) | ||||
| 		return input | ||||
| 	} | ||||
| 	context := &h.Node{Type: h.ElementNode, Data: "body", DataAtom: atom.Body} | ||||
| 	nodes, err := h.ParseFragment(strings.NewReader(strings.TrimSpace(input)), context) | ||||
| 	if err != nil || len(nodes) != 1 { | ||||
| 		w.log.Printf("withHTMLAttributes: Could not extend attributes of %s: %v (%s)", input, nodes, err) | ||||
| 		return input | ||||
| 	} | ||||
| 	out, node := strings.Builder{}, nodes[0] | ||||
| 	for i := 0; i < len(kvs)-1; i += 2 { | ||||
| 		node.Attr = setHTMLAttribute(node.Attr, strings.TrimPrefix(kvs[i], ":"), kvs[i+1]) | ||||
| 	} | ||||
| 	err = h.Render(&out, nodes[0]) | ||||
| 	if err != nil { | ||||
| 		w.log.Printf("withHTMLAttributes: Could not extend attributes of %s: %v (%s)", input, node, err) | ||||
| 		return input | ||||
| 	} | ||||
| 	return out.String() | ||||
| } | ||||
|  | ||||
| func setHTMLAttribute(attributes []h.Attribute, k, v string) []h.Attribute { | ||||
| 	for i, a := range attributes { | ||||
| 		if strings.ToLower(a.Key) == strings.ToLower(k) { | ||||
| 			switch strings.ToLower(k) { | ||||
| 			case "class", "style": | ||||
| 				attributes[i].Val += " " + v | ||||
| 			default: | ||||
| 				attributes[i].Val = v | ||||
| 			} | ||||
| 			return attributes | ||||
| 		} | ||||
| 	} | ||||
| 	return append(attributes, h.Attribute{Namespace: "", Key: k, Val: v}) | ||||
| } | ||||
|  | ||||
| func (fs *footnotes) add(f FootnoteLink) int { | ||||
| 	if i, ok := fs.mapping[f.Name]; ok && f.Name != "" { | ||||
| 		return i | ||||
| 	} | ||||
| 	fs.list = append(fs.list, f.Definition) | ||||
| 	i := len(fs.list) - 1 | ||||
| 	if f.Name != "" { | ||||
| 		fs.mapping[f.Name] = i | ||||
| 	} | ||||
| 	return i | ||||
| } | ||||
|  | ||||
| func (fs *footnotes) updateDefinition(f FootnoteDefinition) { | ||||
| 	if i, ok := fs.mapping[f.Name]; ok { | ||||
| 		fs.list[i] = &f | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										357
									
								
								vendor/github.com/niklasfasching/go-org/org/inline.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										357
									
								
								vendor/github.com/niklasfasching/go-org/org/inline.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,357 @@ | ||||
| package org | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"path" | ||||
| 	"regexp" | ||||
| 	"strings" | ||||
| 	"time" | ||||
| 	"unicode" | ||||
| ) | ||||
|  | ||||
| type Text struct { | ||||
| 	Content string | ||||
| 	IsRaw   bool | ||||
| } | ||||
|  | ||||
| type LineBreak struct{ Count int } | ||||
| type ExplicitLineBreak struct{} | ||||
|  | ||||
| type StatisticToken struct{ Content string } | ||||
|  | ||||
| type Timestamp struct { | ||||
| 	Time     time.Time | ||||
| 	IsDate   bool | ||||
| 	Interval string | ||||
| } | ||||
|  | ||||
| type Emphasis struct { | ||||
| 	Kind    string | ||||
| 	Content []Node | ||||
| } | ||||
|  | ||||
| type LatexFragment struct { | ||||
| 	OpeningPair string | ||||
| 	ClosingPair string | ||||
| 	Content     []Node | ||||
| } | ||||
|  | ||||
| type FootnoteLink struct { | ||||
| 	Name       string | ||||
| 	Definition *FootnoteDefinition | ||||
| } | ||||
|  | ||||
| type RegularLink struct { | ||||
| 	Protocol    string | ||||
| 	Description []Node | ||||
| 	URL         string | ||||
| 	AutoLink    bool | ||||
| } | ||||
|  | ||||
| var validURLCharacters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-._~:/?#[]@!$&'()*+,;=" | ||||
| var autolinkProtocols = regexp.MustCompile(`^(https?|ftp|file)$`) | ||||
| var imageExtensionRegexp = regexp.MustCompile(`^[.](png|gif|jpe?g|svg|tiff?)$`) | ||||
| var videoExtensionRegexp = regexp.MustCompile(`^[.](webm|mp4)$`) | ||||
|  | ||||
| var subScriptSuperScriptRegexp = regexp.MustCompile(`^([_^]){([^{}]+?)}`) | ||||
| var timestampRegexp = regexp.MustCompile(`^<(\d{4}-\d{2}-\d{2})( [A-Za-z]+)?( \d{2}:\d{2})?( \+\d+[dwmy])?>`) | ||||
| var footnoteRegexp = regexp.MustCompile(`^\[fn:([\w-]*?)(:(.*?))?\]`) | ||||
| var statisticsTokenRegexp = regexp.MustCompile(`^\[(\d+/\d+|\d+%)\]`) | ||||
| var latexFragmentRegexp = regexp.MustCompile(`(?s)^\\begin{(\w+)}(.*)\\end{(\w+)}`) | ||||
|  | ||||
| var timestampFormat = "2006-01-02 Mon 15:04" | ||||
| var datestampFormat = "2006-01-02 Mon" | ||||
|  | ||||
| var latexFragmentPairs = map[string]string{ | ||||
| 	`\(`: `\)`, | ||||
| 	`\[`: `\]`, | ||||
| 	`$$`: `$$`, | ||||
| } | ||||
|  | ||||
| func (d *Document) parseInline(input string) (nodes []Node) { | ||||
| 	previous, current := 0, 0 | ||||
| 	for current < len(input) { | ||||
| 		rewind, consumed, node := 0, 0, (Node)(nil) | ||||
| 		switch input[current] { | ||||
| 		case '^': | ||||
| 			consumed, node = d.parseSubOrSuperScript(input, current) | ||||
| 		case '_': | ||||
| 			consumed, node = d.parseSubScriptOrEmphasis(input, current) | ||||
| 		case '*', '/', '+': | ||||
| 			consumed, node = d.parseEmphasis(input, current, false) | ||||
| 		case '=', '~': | ||||
| 			consumed, node = d.parseEmphasis(input, current, true) | ||||
| 		case '[': | ||||
| 			consumed, node = d.parseOpeningBracket(input, current) | ||||
| 		case '<': | ||||
| 			consumed, node = d.parseTimestamp(input, current) | ||||
| 		case '\\': | ||||
| 			consumed, node = d.parseExplicitLineBreakOrLatexFragment(input, current) | ||||
| 		case '$': | ||||
| 			consumed, node = d.parseLatexFragment(input, current) | ||||
| 		case '\n': | ||||
| 			consumed, node = d.parseLineBreak(input, current) | ||||
| 		case ':': | ||||
| 			rewind, consumed, node = d.parseAutoLink(input, current) | ||||
| 			current -= rewind | ||||
| 		} | ||||
| 		if consumed != 0 { | ||||
| 			if current > previous { | ||||
| 				nodes = append(nodes, Text{input[previous:current], false}) | ||||
| 			} | ||||
| 			if node != nil { | ||||
| 				nodes = append(nodes, node) | ||||
| 			} | ||||
| 			current += consumed | ||||
| 			previous = current | ||||
| 		} else { | ||||
| 			current++ | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if previous < len(input) { | ||||
| 		nodes = append(nodes, Text{input[previous:], false}) | ||||
| 	} | ||||
| 	return nodes | ||||
| } | ||||
|  | ||||
| func (d *Document) parseRawInline(input string) (nodes []Node) { | ||||
| 	previous, current := 0, 0 | ||||
| 	for current < len(input) { | ||||
| 		if input[current] == '\n' { | ||||
| 			consumed, node := d.parseLineBreak(input, current) | ||||
| 			if current > previous { | ||||
| 				nodes = append(nodes, Text{input[previous:current], true}) | ||||
| 			} | ||||
| 			nodes = append(nodes, node) | ||||
| 			current += consumed | ||||
| 			previous = current | ||||
| 		} else { | ||||
| 			current++ | ||||
| 		} | ||||
| 	} | ||||
| 	if previous < len(input) { | ||||
| 		nodes = append(nodes, Text{input[previous:], true}) | ||||
| 	} | ||||
| 	return nodes | ||||
| } | ||||
|  | ||||
| func (d *Document) parseLineBreak(input string, start int) (int, Node) { | ||||
| 	i := start | ||||
| 	for ; i < len(input) && input[i] == '\n'; i++ { | ||||
| 	} | ||||
| 	return i - start, LineBreak{i - start} | ||||
| } | ||||
|  | ||||
| func (d *Document) parseExplicitLineBreakOrLatexFragment(input string, start int) (int, Node) { | ||||
| 	switch { | ||||
| 	case start+2 >= len(input): | ||||
| 	case input[start+1] == '\\' && start != 0 && input[start-1] != '\n': | ||||
| 		for i := start + 2; unicode.IsSpace(rune(input[i])); i++ { | ||||
| 			if i >= len(input) || input[i] == '\n' { | ||||
| 				return i + 1 - start, ExplicitLineBreak{} | ||||
| 			} | ||||
| 		} | ||||
| 	case input[start+1] == '(' || input[start+1] == '[': | ||||
| 		return d.parseLatexFragment(input, start) | ||||
| 	case strings.Index(input[start:], `\begin{`) == 0: | ||||
| 		if m := latexFragmentRegexp.FindStringSubmatch(input[start:]); m != nil { | ||||
| 			if open, content, close := m[1], m[2], m[3]; open == close { | ||||
| 				openingPair, closingPair := `\begin{`+open+`}`, `\end{`+close+`}` | ||||
| 				i := strings.Index(input[start:], closingPair) | ||||
| 				return i + len(closingPair), LatexFragment{openingPair, closingPair, d.parseRawInline(content)} | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return 0, nil | ||||
| } | ||||
|  | ||||
| func (d *Document) parseLatexFragment(input string, start int) (int, Node) { | ||||
| 	if start+2 >= len(input) { | ||||
| 		return 0, nil | ||||
| 	} | ||||
| 	openingPair := input[start : start+2] | ||||
| 	closingPair := latexFragmentPairs[openingPair] | ||||
| 	if i := strings.Index(input[start+2:], closingPair); i != -1 { | ||||
| 		content := d.parseRawInline(input[start+2 : start+2+i]) | ||||
| 		return i + 2 + 2, LatexFragment{openingPair, closingPair, content} | ||||
| 	} | ||||
| 	return 0, nil | ||||
| } | ||||
|  | ||||
| func (d *Document) parseSubOrSuperScript(input string, start int) (int, Node) { | ||||
| 	if m := subScriptSuperScriptRegexp.FindStringSubmatch(input[start:]); m != nil { | ||||
| 		return len(m[2]) + 3, Emphasis{m[1] + "{}", []Node{Text{m[2], false}}} | ||||
| 	} | ||||
| 	return 0, nil | ||||
| } | ||||
|  | ||||
| func (d *Document) parseSubScriptOrEmphasis(input string, start int) (int, Node) { | ||||
| 	if consumed, node := d.parseSubOrSuperScript(input, start); consumed != 0 { | ||||
| 		return consumed, node | ||||
| 	} | ||||
| 	return d.parseEmphasis(input, start, false) | ||||
| } | ||||
|  | ||||
| func (d *Document) parseOpeningBracket(input string, start int) (int, Node) { | ||||
| 	if len(input[start:]) >= 2 && input[start] == '[' && input[start+1] == '[' { | ||||
| 		return d.parseRegularLink(input, start) | ||||
| 	} else if footnoteRegexp.MatchString(input[start:]) { | ||||
| 		return d.parseFootnoteReference(input, start) | ||||
| 	} else if statisticsTokenRegexp.MatchString(input[start:]) { | ||||
| 		return d.parseStatisticToken(input, start) | ||||
| 	} | ||||
| 	return 0, nil | ||||
| } | ||||
|  | ||||
| func (d *Document) parseFootnoteReference(input string, start int) (int, Node) { | ||||
| 	if m := footnoteRegexp.FindStringSubmatch(input[start:]); m != nil { | ||||
| 		name, definition := m[1], m[3] | ||||
| 		if name == "" && definition == "" { | ||||
| 			return 0, nil | ||||
| 		} | ||||
| 		link := FootnoteLink{name, nil} | ||||
| 		if definition != "" { | ||||
| 			link.Definition = &FootnoteDefinition{name, []Node{Paragraph{d.parseInline(definition)}}, true} | ||||
| 		} | ||||
| 		return len(m[0]), link | ||||
| 	} | ||||
| 	return 0, nil | ||||
| } | ||||
|  | ||||
| func (d *Document) parseStatisticToken(input string, start int) (int, Node) { | ||||
| 	if m := statisticsTokenRegexp.FindStringSubmatch(input[start:]); m != nil { | ||||
| 		return len(m[1]) + 2, StatisticToken{m[1]} | ||||
| 	} | ||||
| 	return 0, nil | ||||
| } | ||||
|  | ||||
| func (d *Document) parseAutoLink(input string, start int) (int, int, Node) { | ||||
| 	if !d.AutoLink || start == 0 || len(input[start:]) < 3 || input[start:start+3] != "://" { | ||||
| 		return 0, 0, nil | ||||
| 	} | ||||
| 	protocolStart, protocol := start-1, "" | ||||
| 	for ; protocolStart > 0; protocolStart-- { | ||||
| 		if !unicode.IsLetter(rune(input[protocolStart])) { | ||||
| 			protocolStart++ | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
| 	if m := autolinkProtocols.FindStringSubmatch(input[protocolStart:start]); m != nil { | ||||
| 		protocol = m[1] | ||||
| 	} else { | ||||
| 		return 0, 0, nil | ||||
| 	} | ||||
| 	end := start | ||||
| 	for ; end < len(input) && strings.ContainsRune(validURLCharacters, rune(input[end])); end++ { | ||||
| 	} | ||||
| 	path := input[start:end] | ||||
| 	if path == "://" { | ||||
| 		return 0, 0, nil | ||||
| 	} | ||||
| 	return len(protocol), len(path + protocol), RegularLink{protocol, nil, protocol + path, true} | ||||
| } | ||||
|  | ||||
| func (d *Document) parseRegularLink(input string, start int) (int, Node) { | ||||
| 	input = input[start:] | ||||
| 	if len(input) < 3 || input[:2] != "[[" || input[2] == '[' { | ||||
| 		return 0, nil | ||||
| 	} | ||||
| 	end := strings.Index(input, "]]") | ||||
| 	if end == -1 { | ||||
| 		return 0, nil | ||||
| 	} | ||||
| 	rawLinkParts := strings.Split(input[2:end], "][") | ||||
| 	description, link := ([]Node)(nil), rawLinkParts[0] | ||||
| 	if len(rawLinkParts) == 2 { | ||||
| 		link, description = rawLinkParts[0], d.parseInline(rawLinkParts[1]) | ||||
| 	} | ||||
| 	if strings.ContainsRune(link, '\n') { | ||||
| 		return 0, nil | ||||
| 	} | ||||
| 	consumed := end + 2 | ||||
| 	protocol, linkParts := "", strings.SplitN(link, ":", 2) | ||||
| 	if len(linkParts) == 2 { | ||||
| 		protocol = linkParts[0] | ||||
| 	} | ||||
| 	return consumed, RegularLink{protocol, description, link, false} | ||||
| } | ||||
|  | ||||
| func (d *Document) parseTimestamp(input string, start int) (int, Node) { | ||||
| 	if m := timestampRegexp.FindStringSubmatch(input[start:]); m != nil { | ||||
| 		ddmmyy, hhmm, interval, isDate := m[1], m[3], strings.TrimSpace(m[4]), false | ||||
| 		if hhmm == "" { | ||||
| 			hhmm, isDate = "00:00", true | ||||
| 		} | ||||
| 		t, err := time.Parse(timestampFormat, fmt.Sprintf("%s Mon %s", ddmmyy, hhmm)) | ||||
| 		if err != nil { | ||||
| 			return 0, nil | ||||
| 		} | ||||
| 		timestamp := Timestamp{t, isDate, interval} | ||||
| 		return len(m[0]), timestamp | ||||
| 	} | ||||
| 	return 0, nil | ||||
| } | ||||
|  | ||||
| func (d *Document) parseEmphasis(input string, start int, isRaw bool) (int, Node) { | ||||
| 	marker, i := input[start], start | ||||
| 	if !hasValidPreAndBorderChars(input, i) { | ||||
| 		return 0, nil | ||||
| 	} | ||||
| 	for i, consumedNewLines := i+1, 0; i < len(input) && consumedNewLines <= d.MaxEmphasisNewLines; i++ { | ||||
| 		if input[i] == '\n' { | ||||
| 			consumedNewLines++ | ||||
| 		} | ||||
|  | ||||
| 		if input[i] == marker && i != start+1 && hasValidPostAndBorderChars(input, i) { | ||||
| 			if isRaw { | ||||
| 				return i + 1 - start, Emphasis{input[start : start+1], d.parseRawInline(input[start+1 : i])} | ||||
| 			} | ||||
| 			return i + 1 - start, Emphasis{input[start : start+1], d.parseInline(input[start+1 : i])} | ||||
| 		} | ||||
| 	} | ||||
| 	return 0, nil | ||||
| } | ||||
|  | ||||
| // see org-emphasis-regexp-components (emacs elisp variable) | ||||
|  | ||||
| func hasValidPreAndBorderChars(input string, i int) bool { | ||||
| 	return (i+1 >= len(input) || isValidBorderChar(rune(input[i+1]))) && (i == 0 || isValidPreChar(rune(input[i-1]))) | ||||
| } | ||||
|  | ||||
| func hasValidPostAndBorderChars(input string, i int) bool { | ||||
| 	return (i == 0 || isValidBorderChar(rune(input[i-1]))) && (i+1 >= len(input) || isValidPostChar(rune(input[i+1]))) | ||||
| } | ||||
|  | ||||
| func isValidPreChar(r rune) bool { | ||||
| 	return unicode.IsSpace(r) || strings.ContainsRune(`-({'"`, r) | ||||
| } | ||||
|  | ||||
| func isValidPostChar(r rune) bool { | ||||
| 	return unicode.IsSpace(r) || strings.ContainsRune(`-.,:!?;'")}[`, r) | ||||
| } | ||||
|  | ||||
| func isValidBorderChar(r rune) bool { return !unicode.IsSpace(r) } | ||||
|  | ||||
| func (l RegularLink) Kind() string { | ||||
| 	if p := l.Protocol; l.Description != nil || (p != "" && p != "file" && p != "http" && p != "https") { | ||||
| 		return "regular" | ||||
| 	} | ||||
| 	if imageExtensionRegexp.MatchString(path.Ext(l.URL)) { | ||||
| 		return "image" | ||||
| 	} | ||||
| 	if videoExtensionRegexp.MatchString(path.Ext(l.URL)) { | ||||
| 		return "video" | ||||
| 	} | ||||
| 	return "regular" | ||||
| } | ||||
|  | ||||
| func (n Text) String() string              { return orgWriter.nodesAsString(n) } | ||||
| func (n LineBreak) String() string         { return orgWriter.nodesAsString(n) } | ||||
| func (n ExplicitLineBreak) String() string { return orgWriter.nodesAsString(n) } | ||||
| func (n StatisticToken) String() string    { return orgWriter.nodesAsString(n) } | ||||
| func (n Emphasis) String() string          { return orgWriter.nodesAsString(n) } | ||||
| func (n LatexFragment) String() string     { return orgWriter.nodesAsString(n) } | ||||
| func (n FootnoteLink) String() string      { return orgWriter.nodesAsString(n) } | ||||
| func (n RegularLink) String() string       { return orgWriter.nodesAsString(n) } | ||||
| func (n Timestamp) String() string         { return orgWriter.nodesAsString(n) } | ||||
							
								
								
									
										184
									
								
								vendor/github.com/niklasfasching/go-org/org/keyword.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										184
									
								
								vendor/github.com/niklasfasching/go-org/org/keyword.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,184 @@ | ||||
| package org | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"path/filepath" | ||||
| 	"regexp" | ||||
| 	"strings" | ||||
| ) | ||||
|  | ||||
| type Comment struct{ Content string } | ||||
|  | ||||
| type Keyword struct { | ||||
| 	Key   string | ||||
| 	Value string | ||||
| } | ||||
|  | ||||
| type NodeWithName struct { | ||||
| 	Name string | ||||
| 	Node Node | ||||
| } | ||||
|  | ||||
| type NodeWithMeta struct { | ||||
| 	Node Node | ||||
| 	Meta Metadata | ||||
| } | ||||
|  | ||||
| type Metadata struct { | ||||
| 	Caption        [][]Node | ||||
| 	HTMLAttributes [][]string | ||||
| } | ||||
|  | ||||
| type Include struct { | ||||
| 	Keyword | ||||
| 	Resolve func() Node | ||||
| } | ||||
|  | ||||
| var keywordRegexp = regexp.MustCompile(`^(\s*)#\+([^:]+):(\s+(.*)|$)`) | ||||
| var commentRegexp = regexp.MustCompile(`^(\s*)#(.*)`) | ||||
|  | ||||
| var includeFileRegexp = regexp.MustCompile(`(?i)^"([^"]+)" (src|example|export) (\w+)$`) | ||||
| var attributeRegexp = regexp.MustCompile(`(?:^|\s+)(:[-\w]+)\s+(.*)$`) | ||||
|  | ||||
| func lexKeywordOrComment(line string) (token, bool) { | ||||
| 	if m := keywordRegexp.FindStringSubmatch(line); m != nil { | ||||
| 		return token{"keyword", len(m[1]), m[2], m}, true | ||||
| 	} else if m := commentRegexp.FindStringSubmatch(line); m != nil { | ||||
| 		return token{"comment", len(m[1]), m[2], m}, true | ||||
| 	} | ||||
| 	return nilToken, false | ||||
| } | ||||
|  | ||||
| func (d *Document) parseComment(i int, stop stopFn) (int, Node) { | ||||
| 	return 1, Comment{d.tokens[i].content} | ||||
| } | ||||
|  | ||||
| func (d *Document) parseKeyword(i int, stop stopFn) (int, Node) { | ||||
| 	k := parseKeyword(d.tokens[i]) | ||||
| 	switch k.Key { | ||||
| 	case "NAME": | ||||
| 		return d.parseNodeWithName(k, i, stop) | ||||
| 	case "SETUPFILE": | ||||
| 		return d.loadSetupFile(k) | ||||
| 	case "INCLUDE": | ||||
| 		return d.parseInclude(k) | ||||
| 	case "CAPTION", "ATTR_HTML": | ||||
| 		consumed, node := d.parseAffiliated(i, stop) | ||||
| 		if consumed != 0 { | ||||
| 			return consumed, node | ||||
| 		} | ||||
| 		fallthrough | ||||
| 	default: | ||||
| 		if _, ok := d.BufferSettings[k.Key]; ok { | ||||
| 			d.BufferSettings[k.Key] = strings.Join([]string{d.BufferSettings[k.Key], k.Value}, "\n") | ||||
| 		} else { | ||||
| 			d.BufferSettings[k.Key] = k.Value | ||||
| 		} | ||||
| 		return 1, k | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (d *Document) parseNodeWithName(k Keyword, i int, stop stopFn) (int, Node) { | ||||
| 	if stop(d, i+1) { | ||||
| 		return 0, nil | ||||
| 	} | ||||
| 	consumed, node := d.parseOne(i+1, stop) | ||||
| 	if consumed == 0 || node == nil { | ||||
| 		return 0, nil | ||||
| 	} | ||||
| 	d.NamedNodes[k.Value] = node | ||||
| 	return consumed + 1, NodeWithName{k.Value, node} | ||||
| } | ||||
|  | ||||
| func (d *Document) parseAffiliated(i int, stop stopFn) (int, Node) { | ||||
| 	start, meta := i, Metadata{} | ||||
| 	for ; !stop(d, i) && d.tokens[i].kind == "keyword"; i++ { | ||||
| 		switch k := parseKeyword(d.tokens[i]); k.Key { | ||||
| 		case "CAPTION": | ||||
| 			meta.Caption = append(meta.Caption, d.parseInline(k.Value)) | ||||
| 		case "ATTR_HTML": | ||||
| 			attributes, rest := []string{}, k.Value | ||||
| 			for { | ||||
| 				if k, m := "", attributeRegexp.FindStringSubmatch(rest); m != nil { | ||||
| 					k, rest = m[1], m[2] | ||||
| 					attributes = append(attributes, k) | ||||
| 					if v, m := "", attributeRegexp.FindStringSubmatchIndex(rest); m != nil { | ||||
| 						v, rest = rest[:m[0]], rest[m[0]:] | ||||
| 						attributes = append(attributes, v) | ||||
| 					} else { | ||||
| 						attributes = append(attributes, strings.TrimSpace(rest)) | ||||
| 						break | ||||
| 					} | ||||
| 				} else { | ||||
| 					break | ||||
| 				} | ||||
| 			} | ||||
| 			meta.HTMLAttributes = append(meta.HTMLAttributes, attributes) | ||||
| 		default: | ||||
| 			return 0, nil | ||||
| 		} | ||||
| 	} | ||||
| 	if stop(d, i) { | ||||
| 		return 0, nil | ||||
| 	} | ||||
| 	consumed, node := d.parseOne(i, stop) | ||||
| 	if consumed == 0 || node == nil { | ||||
| 		return 0, nil | ||||
| 	} | ||||
| 	i += consumed | ||||
| 	return i - start, NodeWithMeta{node, meta} | ||||
| } | ||||
|  | ||||
| func parseKeyword(t token) Keyword { | ||||
| 	k, v := t.matches[2], t.matches[4] | ||||
| 	return Keyword{strings.ToUpper(k), strings.TrimSpace(v)} | ||||
| } | ||||
|  | ||||
| func (d *Document) parseInclude(k Keyword) (int, Node) { | ||||
| 	resolve := func() Node { | ||||
| 		d.Log.Printf("Bad include %#v", k) | ||||
| 		return k | ||||
| 	} | ||||
| 	if m := includeFileRegexp.FindStringSubmatch(k.Value); m != nil { | ||||
| 		path, kind, lang := m[1], m[2], m[3] | ||||
| 		if !filepath.IsAbs(path) { | ||||
| 			path = filepath.Join(filepath.Dir(d.Path), path) | ||||
| 		} | ||||
| 		resolve = func() Node { | ||||
| 			bs, err := d.ReadFile(path) | ||||
| 			if err != nil { | ||||
| 				d.Log.Printf("Bad include %#v: %s", k, err) | ||||
| 				return k | ||||
| 			} | ||||
| 			return Block{strings.ToUpper(kind), []string{lang}, d.parseRawInline(string(bs))} | ||||
| 		} | ||||
| 	} | ||||
| 	return 1, Include{k, resolve} | ||||
| } | ||||
|  | ||||
| func (d *Document) loadSetupFile(k Keyword) (int, Node) { | ||||
| 	path := k.Value | ||||
| 	if !filepath.IsAbs(path) { | ||||
| 		path = filepath.Join(filepath.Dir(d.Path), path) | ||||
| 	} | ||||
| 	bs, err := d.ReadFile(path) | ||||
| 	if err != nil { | ||||
| 		d.Log.Printf("Bad setup file: %#v: %s", k, err) | ||||
| 		return 1, k | ||||
| 	} | ||||
| 	setupDocument := d.Configuration.Parse(bytes.NewReader(bs), path) | ||||
| 	if err := setupDocument.Error; err != nil { | ||||
| 		d.Log.Printf("Bad setup file: %#v: %s", k, err) | ||||
| 		return 1, k | ||||
| 	} | ||||
| 	for k, v := range setupDocument.BufferSettings { | ||||
| 		d.BufferSettings[k] = v | ||||
| 	} | ||||
| 	return 1, k | ||||
| } | ||||
|  | ||||
| func (n Comment) String() string      { return orgWriter.nodesAsString(n) } | ||||
| func (n Keyword) String() string      { return orgWriter.nodesAsString(n) } | ||||
| func (n NodeWithMeta) String() string { return orgWriter.nodesAsString(n) } | ||||
| func (n NodeWithName) String() string { return orgWriter.nodesAsString(n) } | ||||
| func (n Include) String() string      { return orgWriter.nodesAsString(n) } | ||||
							
								
								
									
										114
									
								
								vendor/github.com/niklasfasching/go-org/org/list.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										114
									
								
								vendor/github.com/niklasfasching/go-org/org/list.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,114 @@ | ||||
| package org | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"regexp" | ||||
| 	"strings" | ||||
| 	"unicode" | ||||
| ) | ||||
|  | ||||
| type List struct { | ||||
| 	Kind  string | ||||
| 	Items []Node | ||||
| } | ||||
|  | ||||
| type ListItem struct { | ||||
| 	Bullet   string | ||||
| 	Status   string | ||||
| 	Children []Node | ||||
| } | ||||
|  | ||||
| type DescriptiveListItem struct { | ||||
| 	Bullet  string | ||||
| 	Status  string | ||||
| 	Term    []Node | ||||
| 	Details []Node | ||||
| } | ||||
|  | ||||
| var unorderedListRegexp = regexp.MustCompile(`^(\s*)([+*-])(\s+(.*)|$)`) | ||||
| var orderedListRegexp = regexp.MustCompile(`^(\s*)(([0-9]+|[a-zA-Z])[.)])(\s+(.*)|$)`) | ||||
| var descriptiveListItemRegexp = regexp.MustCompile(`\s::(\s|$)`) | ||||
| var listItemStatusRegexp = regexp.MustCompile(`\[( |X|-)\]\s`) | ||||
|  | ||||
| func lexList(line string) (token, bool) { | ||||
| 	if m := unorderedListRegexp.FindStringSubmatch(line); m != nil { | ||||
| 		return token{"unorderedList", len(m[1]), m[4], m}, true | ||||
| 	} else if m := orderedListRegexp.FindStringSubmatch(line); m != nil { | ||||
| 		return token{"orderedList", len(m[1]), m[5], m}, true | ||||
| 	} | ||||
| 	return nilToken, false | ||||
| } | ||||
|  | ||||
| func isListToken(t token) bool { | ||||
| 	return t.kind == "unorderedList" || t.kind == "orderedList" | ||||
| } | ||||
|  | ||||
| func listKind(t token) (string, string) { | ||||
| 	kind := "" | ||||
| 	switch bullet := t.matches[2]; { | ||||
| 	case bullet == "*" || bullet == "+" || bullet == "-": | ||||
| 		kind = "unordered" | ||||
| 	case unicode.IsLetter(rune(bullet[0])), unicode.IsDigit(rune(bullet[0])): | ||||
| 		kind = "ordered" | ||||
| 	default: | ||||
| 		panic(fmt.Sprintf("bad list bullet '%s': %#v", bullet, t)) | ||||
| 	} | ||||
| 	if descriptiveListItemRegexp.MatchString(t.content) { | ||||
| 		return kind, "descriptive" | ||||
| 	} | ||||
| 	return kind, kind | ||||
| } | ||||
|  | ||||
| func (d *Document) parseList(i int, parentStop stopFn) (int, Node) { | ||||
| 	start, lvl := i, d.tokens[i].lvl | ||||
| 	listMainKind, kind := listKind(d.tokens[i]) | ||||
| 	list := List{Kind: kind} | ||||
| 	stop := func(*Document, int) bool { | ||||
| 		if parentStop(d, i) || d.tokens[i].lvl != lvl || !isListToken(d.tokens[i]) { | ||||
| 			return true | ||||
| 		} | ||||
| 		itemMainKind, _ := listKind(d.tokens[i]) | ||||
| 		return itemMainKind != listMainKind | ||||
| 	} | ||||
| 	for !stop(d, i) { | ||||
| 		consumed, node := d.parseListItem(list, i, parentStop) | ||||
| 		i += consumed | ||||
| 		list.Items = append(list.Items, node) | ||||
| 	} | ||||
| 	return i - start, list | ||||
| } | ||||
|  | ||||
| func (d *Document) parseListItem(l List, i int, parentStop stopFn) (int, Node) { | ||||
| 	start, nodes, bullet := i, []Node{}, d.tokens[i].matches[2] | ||||
| 	minIndent, dterm, content, status := d.tokens[i].lvl+len(bullet), "", d.tokens[i].content, "" | ||||
| 	if m := listItemStatusRegexp.FindStringSubmatch(content); m != nil { | ||||
| 		status, content = m[1], content[len("[ ] "):] | ||||
| 	} | ||||
| 	if l.Kind == "descriptive" { | ||||
| 		if m := descriptiveListItemRegexp.FindStringIndex(content); m != nil { | ||||
| 			dterm, content = content[:m[0]], content[m[1]:] | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	d.tokens[i] = tokenize(strings.Repeat(" ", minIndent) + content) | ||||
| 	stop := func(d *Document, i int) bool { | ||||
| 		if parentStop(d, i) { | ||||
| 			return true | ||||
| 		} | ||||
| 		t := d.tokens[i] | ||||
| 		return t.lvl < minIndent && !(t.kind == "text" && t.content == "") | ||||
| 	} | ||||
| 	for !stop(d, i) && (i <= start+1 || !isSecondBlankLine(d, i)) { | ||||
| 		consumed, node := d.parseOne(i, stop) | ||||
| 		i += consumed | ||||
| 		nodes = append(nodes, node) | ||||
| 	} | ||||
| 	if l.Kind == "descriptive" { | ||||
| 		return i - start, DescriptiveListItem{bullet, status, d.parseInline(dterm), nodes} | ||||
| 	} | ||||
| 	return i - start, ListItem{bullet, status, nodes} | ||||
| } | ||||
|  | ||||
| func (n List) String() string                { return orgWriter.nodesAsString(n) } | ||||
| func (n ListItem) String() string            { return orgWriter.nodesAsString(n) } | ||||
| func (n DescriptiveListItem) String() string { return orgWriter.nodesAsString(n) } | ||||
							
								
								
									
										334
									
								
								vendor/github.com/niklasfasching/go-org/org/org_writer.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										334
									
								
								vendor/github.com/niklasfasching/go-org/org/org_writer.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,334 @@ | ||||
| package org | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"strings" | ||||
| 	"unicode" | ||||
| 	"unicode/utf8" | ||||
| ) | ||||
|  | ||||
| // OrgWriter export an org document into pretty printed org document. | ||||
| type OrgWriter struct { | ||||
| 	ExtendingWriter Writer | ||||
| 	TagsColumn      int | ||||
|  | ||||
| 	strings.Builder | ||||
| 	indent string | ||||
| } | ||||
|  | ||||
| var emphasisOrgBorders = map[string][]string{ | ||||
| 	"_":   []string{"_", "_"}, | ||||
| 	"*":   []string{"*", "*"}, | ||||
| 	"/":   []string{"/", "/"}, | ||||
| 	"+":   []string{"+", "+"}, | ||||
| 	"~":   []string{"~", "~"}, | ||||
| 	"=":   []string{"=", "="}, | ||||
| 	"_{}": []string{"_{", "}"}, | ||||
| 	"^{}": []string{"^{", "}"}, | ||||
| } | ||||
|  | ||||
| func NewOrgWriter() *OrgWriter { | ||||
| 	return &OrgWriter{ | ||||
| 		TagsColumn: 77, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (w *OrgWriter) WriterWithExtensions() Writer { | ||||
| 	if w.ExtendingWriter != nil { | ||||
| 		return w.ExtendingWriter | ||||
| 	} | ||||
| 	return w | ||||
| } | ||||
|  | ||||
| func (w *OrgWriter) Before(d *Document) {} | ||||
| func (w *OrgWriter) After(d *Document)  {} | ||||
|  | ||||
| func (w *OrgWriter) emptyClone() *OrgWriter { | ||||
| 	wcopy := *w | ||||
| 	wcopy.Builder = strings.Builder{} | ||||
| 	return &wcopy | ||||
| } | ||||
|  | ||||
| func (w *OrgWriter) nodesAsString(nodes ...Node) string { | ||||
| 	tmp := w.emptyClone() | ||||
| 	WriteNodes(tmp, nodes...) | ||||
| 	return tmp.String() | ||||
| } | ||||
|  | ||||
| func (w *OrgWriter) WriteHeadline(h Headline) { | ||||
| 	tmp := w.emptyClone() | ||||
| 	tmp.WriteString(strings.Repeat("*", h.Lvl)) | ||||
| 	if h.Status != "" { | ||||
| 		tmp.WriteString(" " + h.Status) | ||||
| 	} | ||||
| 	if h.Priority != "" { | ||||
| 		tmp.WriteString(" [#" + h.Priority + "]") | ||||
| 	} | ||||
| 	tmp.WriteString(" ") | ||||
| 	WriteNodes(tmp, h.Title...) | ||||
| 	hString := tmp.String() | ||||
| 	if len(h.Tags) != 0 { | ||||
| 		tString := ":" + strings.Join(h.Tags, ":") + ":" | ||||
| 		if n := w.TagsColumn - len(tString) - len(hString); n > 0 { | ||||
| 			w.WriteString(hString + strings.Repeat(" ", n) + tString) | ||||
| 		} else { | ||||
| 			w.WriteString(hString + " " + tString) | ||||
| 		} | ||||
| 	} else { | ||||
| 		w.WriteString(hString) | ||||
| 	} | ||||
| 	w.WriteString("\n") | ||||
| 	if len(h.Children) != 0 { | ||||
| 		w.WriteString(w.indent) | ||||
| 	} | ||||
| 	if h.Properties != nil { | ||||
| 		WriteNodes(w, *h.Properties) | ||||
| 	} | ||||
| 	WriteNodes(w, h.Children...) | ||||
| } | ||||
|  | ||||
| func (w *OrgWriter) WriteBlock(b Block) { | ||||
| 	w.WriteString(w.indent + "#+BEGIN_" + b.Name) | ||||
| 	if len(b.Parameters) != 0 { | ||||
| 		w.WriteString(" " + strings.Join(b.Parameters, " ")) | ||||
| 	} | ||||
| 	w.WriteString("\n") | ||||
| 	if isRawTextBlock(b.Name) { | ||||
| 		w.WriteString(w.indent) | ||||
| 	} | ||||
| 	WriteNodes(w, b.Children...) | ||||
| 	if !isRawTextBlock(b.Name) { | ||||
| 		w.WriteString(w.indent) | ||||
| 	} | ||||
| 	w.WriteString("#+END_" + b.Name + "\n") | ||||
| } | ||||
|  | ||||
| func (w *OrgWriter) WriteDrawer(d Drawer) { | ||||
| 	w.WriteString(w.indent + ":" + d.Name + ":\n") | ||||
| 	WriteNodes(w, d.Children...) | ||||
| 	w.WriteString(w.indent + ":END:\n") | ||||
| } | ||||
|  | ||||
| func (w *OrgWriter) WritePropertyDrawer(d PropertyDrawer) { | ||||
| 	w.WriteString(":PROPERTIES:\n") | ||||
| 	for _, kvPair := range d.Properties { | ||||
| 		k, v := kvPair[0], kvPair[1] | ||||
| 		if v != "" { | ||||
| 			v = " " + v | ||||
| 		} | ||||
| 		w.WriteString(fmt.Sprintf(":%s:%s\n", k, v)) | ||||
| 	} | ||||
| 	w.WriteString(":END:\n") | ||||
| } | ||||
|  | ||||
| func (w *OrgWriter) WriteFootnoteDefinition(f FootnoteDefinition) { | ||||
| 	w.WriteString(fmt.Sprintf("[fn:%s]", f.Name)) | ||||
| 	content := w.nodesAsString(f.Children...) | ||||
| 	if content != "" && !unicode.IsSpace(rune(content[0])) { | ||||
| 		w.WriteString(" ") | ||||
| 	} | ||||
| 	w.WriteString(content) | ||||
| } | ||||
|  | ||||
| func (w *OrgWriter) WriteParagraph(p Paragraph) { | ||||
| 	content := w.nodesAsString(p.Children...) | ||||
| 	if len(content) > 0 && content[0] != '\n' { | ||||
| 		w.WriteString(w.indent) | ||||
| 	} | ||||
| 	w.WriteString(content + "\n") | ||||
| } | ||||
|  | ||||
| func (w *OrgWriter) WriteExample(e Example) { | ||||
| 	for _, n := range e.Children { | ||||
| 		w.WriteString(w.indent + ":") | ||||
| 		if content := w.nodesAsString(n); content != "" { | ||||
| 			w.WriteString(" " + content) | ||||
| 		} | ||||
| 		w.WriteString("\n") | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (w *OrgWriter) WriteKeyword(k Keyword) { | ||||
| 	w.WriteString(w.indent + "#+" + k.Key + ":") | ||||
| 	if k.Value != "" { | ||||
| 		w.WriteString(" " + k.Value) | ||||
| 	} | ||||
| 	w.WriteString("\n") | ||||
| } | ||||
|  | ||||
| func (w *OrgWriter) WriteInclude(i Include) { | ||||
| 	w.WriteKeyword(i.Keyword) | ||||
| } | ||||
|  | ||||
| func (w *OrgWriter) WriteNodeWithMeta(n NodeWithMeta) { | ||||
| 	for _, ns := range n.Meta.Caption { | ||||
| 		w.WriteString("#+CAPTION: ") | ||||
| 		WriteNodes(w, ns...) | ||||
| 		w.WriteString("\n") | ||||
| 	} | ||||
| 	for _, attributes := range n.Meta.HTMLAttributes { | ||||
| 		w.WriteString("#+ATTR_HTML: ") | ||||
| 		w.WriteString(strings.Join(attributes, " ") + "\n") | ||||
| 	} | ||||
| 	WriteNodes(w, n.Node) | ||||
| } | ||||
|  | ||||
| func (w *OrgWriter) WriteNodeWithName(n NodeWithName) { | ||||
| 	w.WriteString(fmt.Sprintf("#+NAME: %s\n", n.Name)) | ||||
| 	WriteNodes(w, n.Node) | ||||
| } | ||||
|  | ||||
| func (w *OrgWriter) WriteComment(c Comment) { | ||||
| 	w.WriteString(w.indent + "#" + c.Content + "\n") | ||||
| } | ||||
|  | ||||
| func (w *OrgWriter) WriteList(l List) { WriteNodes(w, l.Items...) } | ||||
|  | ||||
| func (w *OrgWriter) WriteListItem(li ListItem) { | ||||
| 	liWriter := w.emptyClone() | ||||
| 	liWriter.indent = w.indent + strings.Repeat(" ", len(li.Bullet)+1) | ||||
| 	WriteNodes(liWriter, li.Children...) | ||||
| 	content := strings.TrimPrefix(liWriter.String(), liWriter.indent) | ||||
| 	w.WriteString(w.indent + li.Bullet) | ||||
| 	if li.Status != "" { | ||||
| 		w.WriteString(fmt.Sprintf(" [%s]", li.Status)) | ||||
| 	} | ||||
| 	if len(content) > 0 && content[0] == '\n' { | ||||
| 		w.WriteString(content) | ||||
| 	} else { | ||||
| 		w.WriteString(" " + content) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (w *OrgWriter) WriteDescriptiveListItem(di DescriptiveListItem) { | ||||
| 	w.WriteString(w.indent + di.Bullet) | ||||
| 	if di.Status != "" { | ||||
| 		w.WriteString(fmt.Sprintf(" [%s]", di.Status)) | ||||
| 	} | ||||
| 	indent := w.indent + strings.Repeat(" ", len(di.Bullet)+1) | ||||
| 	if len(di.Term) != 0 { | ||||
| 		term := w.nodesAsString(di.Term...) | ||||
| 		w.WriteString(" " + term + " ::") | ||||
| 		indent = indent + strings.Repeat(" ", len(term)+4) | ||||
| 	} | ||||
| 	diWriter := w.emptyClone() | ||||
| 	diWriter.indent = indent | ||||
| 	WriteNodes(diWriter, di.Details...) | ||||
| 	details := strings.TrimPrefix(diWriter.String(), diWriter.indent) | ||||
| 	if len(details) > 0 && details[0] == '\n' { | ||||
| 		w.WriteString(details) | ||||
| 	} else { | ||||
| 		w.WriteString(" " + details) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (w *OrgWriter) WriteTable(t Table) { | ||||
| 	for _, row := range t.Rows { | ||||
| 		w.WriteString(w.indent) | ||||
| 		if len(row.Columns) == 0 { | ||||
| 			w.WriteString(`|`) | ||||
| 			for i := 0; i < len(t.ColumnInfos); i++ { | ||||
| 				w.WriteString(strings.Repeat("-", t.ColumnInfos[i].Len+2)) | ||||
| 				if i < len(t.ColumnInfos)-1 { | ||||
| 					w.WriteString("+") | ||||
| 				} | ||||
| 			} | ||||
| 			w.WriteString(`|`) | ||||
|  | ||||
| 		} else { | ||||
| 			w.WriteString(`|`) | ||||
| 			for _, column := range row.Columns { | ||||
| 				w.WriteString(` `) | ||||
| 				content := w.nodesAsString(column.Children...) | ||||
| 				if content == "" { | ||||
| 					content = " " | ||||
| 				} | ||||
| 				n := column.Len - utf8.RuneCountInString(content) | ||||
| 				if n < 0 { | ||||
| 					n = 0 | ||||
| 				} | ||||
| 				if column.Align == "center" { | ||||
| 					if n%2 != 0 { | ||||
| 						w.WriteString(" ") | ||||
| 					} | ||||
| 					w.WriteString(strings.Repeat(" ", n/2) + content + strings.Repeat(" ", n/2)) | ||||
| 				} else if column.Align == "right" { | ||||
| 					w.WriteString(strings.Repeat(" ", n) + content) | ||||
| 				} else { | ||||
| 					w.WriteString(content + strings.Repeat(" ", n)) | ||||
| 				} | ||||
| 				w.WriteString(` |`) | ||||
| 			} | ||||
| 		} | ||||
| 		w.WriteString("\n") | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (w *OrgWriter) WriteHorizontalRule(hr HorizontalRule) { | ||||
| 	w.WriteString(w.indent + "-----\n") | ||||
| } | ||||
|  | ||||
| func (w *OrgWriter) WriteText(t Text) { w.WriteString(t.Content) } | ||||
|  | ||||
| func (w *OrgWriter) WriteEmphasis(e Emphasis) { | ||||
| 	borders, ok := emphasisOrgBorders[e.Kind] | ||||
| 	if !ok { | ||||
| 		panic(fmt.Sprintf("bad emphasis %#v", e)) | ||||
| 	} | ||||
| 	w.WriteString(borders[0]) | ||||
| 	WriteNodes(w, e.Content...) | ||||
| 	w.WriteString(borders[1]) | ||||
| } | ||||
|  | ||||
| func (w *OrgWriter) WriteLatexFragment(l LatexFragment) { | ||||
| 	w.WriteString(l.OpeningPair) | ||||
| 	WriteNodes(w, l.Content...) | ||||
| 	w.WriteString(l.ClosingPair) | ||||
| } | ||||
|  | ||||
| func (w *OrgWriter) WriteStatisticToken(s StatisticToken) { | ||||
| 	w.WriteString(fmt.Sprintf("[%s]", s.Content)) | ||||
| } | ||||
|  | ||||
| func (w *OrgWriter) WriteLineBreak(l LineBreak) { | ||||
| 	w.WriteString(strings.Repeat("\n"+w.indent, l.Count)) | ||||
| } | ||||
|  | ||||
| func (w *OrgWriter) WriteExplicitLineBreak(l ExplicitLineBreak) { | ||||
| 	w.WriteString(`\\` + "\n" + w.indent) | ||||
| } | ||||
|  | ||||
| func (w *OrgWriter) WriteTimestamp(t Timestamp) { | ||||
| 	w.WriteString("<") | ||||
| 	if t.IsDate { | ||||
| 		w.WriteString(t.Time.Format(datestampFormat)) | ||||
| 	} else { | ||||
| 		w.WriteString(t.Time.Format(timestampFormat)) | ||||
| 	} | ||||
| 	if t.Interval != "" { | ||||
| 		w.WriteString(" " + t.Interval) | ||||
| 	} | ||||
| 	w.WriteString(">") | ||||
| } | ||||
|  | ||||
| func (w *OrgWriter) WriteFootnoteLink(l FootnoteLink) { | ||||
| 	w.WriteString("[fn:" + l.Name) | ||||
| 	if l.Definition != nil { | ||||
| 		w.WriteString(":") | ||||
| 		WriteNodes(w, l.Definition.Children[0].(Paragraph).Children...) | ||||
| 	} | ||||
| 	w.WriteString("]") | ||||
| } | ||||
|  | ||||
| func (w *OrgWriter) WriteRegularLink(l RegularLink) { | ||||
| 	if l.AutoLink { | ||||
| 		w.WriteString(l.URL) | ||||
| 	} else if l.Description == nil { | ||||
| 		w.WriteString(fmt.Sprintf("[[%s]]", l.URL)) | ||||
| 	} else { | ||||
| 		descriptionWriter := w.emptyClone() | ||||
| 		WriteNodes(descriptionWriter, l.Description...) | ||||
| 		description := descriptionWriter.String() | ||||
| 		w.WriteString(fmt.Sprintf("[[%s][%s]]", l.URL, description)) | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										46
									
								
								vendor/github.com/niklasfasching/go-org/org/paragraph.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										46
									
								
								vendor/github.com/niklasfasching/go-org/org/paragraph.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,46 @@ | ||||
| package org | ||||
|  | ||||
| import ( | ||||
| 	"regexp" | ||||
| 	"strings" | ||||
| ) | ||||
|  | ||||
| type Paragraph struct{ Children []Node } | ||||
| type HorizontalRule struct{} | ||||
|  | ||||
| var horizontalRuleRegexp = regexp.MustCompile(`^(\s*)-{5,}\s*$`) | ||||
| var plainTextRegexp = regexp.MustCompile(`^(\s*)(.*)`) | ||||
|  | ||||
| func lexText(line string) (token, bool) { | ||||
| 	if m := plainTextRegexp.FindStringSubmatch(line); m != nil { | ||||
| 		return token{"text", len(m[1]), m[2], m}, true | ||||
| 	} | ||||
| 	return nilToken, false | ||||
| } | ||||
|  | ||||
| func lexHorizontalRule(line string) (token, bool) { | ||||
| 	if m := horizontalRuleRegexp.FindStringSubmatch(line); m != nil { | ||||
| 		return token{"horizontalRule", len(m[1]), "", m}, true | ||||
| 	} | ||||
| 	return nilToken, false | ||||
| } | ||||
|  | ||||
| func (d *Document) parseParagraph(i int, parentStop stopFn) (int, Node) { | ||||
| 	lines, start := []string{d.tokens[i].content}, i | ||||
| 	i++ | ||||
| 	stop := func(d *Document, i int) bool { | ||||
| 		return parentStop(d, i) || d.tokens[i].kind != "text" || d.tokens[i].content == "" | ||||
| 	} | ||||
| 	for ; !stop(d, i); i++ { | ||||
| 		lines = append(lines, d.tokens[i].content) | ||||
| 	} | ||||
| 	consumed := i - start | ||||
| 	return consumed, Paragraph{d.parseInline(strings.Join(lines, "\n"))} | ||||
| } | ||||
|  | ||||
| func (d *Document) parseHorizontalRule(i int, parentStop stopFn) (int, Node) { | ||||
| 	return 1, HorizontalRule{} | ||||
| } | ||||
|  | ||||
| func (n Paragraph) String() string      { return orgWriter.nodesAsString(n) } | ||||
| func (n HorizontalRule) String() string { return orgWriter.nodesAsString(n) } | ||||
							
								
								
									
										130
									
								
								vendor/github.com/niklasfasching/go-org/org/table.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										130
									
								
								vendor/github.com/niklasfasching/go-org/org/table.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,130 @@ | ||||
| package org | ||||
|  | ||||
| import ( | ||||
| 	"regexp" | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
| 	"unicode/utf8" | ||||
| ) | ||||
|  | ||||
| type Table struct { | ||||
| 	Rows        []Row | ||||
| 	ColumnInfos []ColumnInfo | ||||
| } | ||||
|  | ||||
| type Row struct { | ||||
| 	Columns   []Column | ||||
| 	IsSpecial bool | ||||
| } | ||||
|  | ||||
| type Column struct { | ||||
| 	Children []Node | ||||
| 	*ColumnInfo | ||||
| } | ||||
|  | ||||
| type ColumnInfo struct { | ||||
| 	Align string | ||||
| 	Len   int | ||||
| } | ||||
|  | ||||
| var tableSeparatorRegexp = regexp.MustCompile(`^(\s*)(\|[+-|]*)\s*$`) | ||||
| var tableRowRegexp = regexp.MustCompile(`^(\s*)(\|.*)`) | ||||
|  | ||||
| var columnAlignRegexp = regexp.MustCompile(`^<(l|c|r)>$`) | ||||
|  | ||||
| func lexTable(line string) (token, bool) { | ||||
| 	if m := tableSeparatorRegexp.FindStringSubmatch(line); m != nil { | ||||
| 		return token{"tableSeparator", len(m[1]), m[2], m}, true | ||||
| 	} else if m := tableRowRegexp.FindStringSubmatch(line); m != nil { | ||||
| 		return token{"tableRow", len(m[1]), m[2], m}, true | ||||
| 	} | ||||
| 	return nilToken, false | ||||
| } | ||||
|  | ||||
| func (d *Document) parseTable(i int, parentStop stopFn) (int, Node) { | ||||
| 	rawRows, start := [][]string{}, i | ||||
| 	for ; !parentStop(d, i); i++ { | ||||
| 		if t := d.tokens[i]; t.kind == "tableRow" { | ||||
| 			rawRow := strings.FieldsFunc(d.tokens[i].content, func(r rune) bool { return r == '|' }) | ||||
| 			for i := range rawRow { | ||||
| 				rawRow[i] = strings.TrimSpace(rawRow[i]) | ||||
| 			} | ||||
| 			rawRows = append(rawRows, rawRow) | ||||
| 		} else if t.kind == "tableSeparator" { | ||||
| 			rawRows = append(rawRows, nil) | ||||
| 		} else { | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	table := Table{nil, getColumnInfos(rawRows)} | ||||
| 	for _, rawColumns := range rawRows { | ||||
| 		row := Row{nil, isSpecialRow(rawColumns)} | ||||
| 		if len(rawColumns) != 0 { | ||||
| 			for i := range table.ColumnInfos { | ||||
| 				column := Column{nil, &table.ColumnInfos[i]} | ||||
| 				if i < len(rawColumns) { | ||||
| 					column.Children = d.parseInline(rawColumns[i]) | ||||
| 				} | ||||
| 				row.Columns = append(row.Columns, column) | ||||
| 			} | ||||
| 		} | ||||
| 		table.Rows = append(table.Rows, row) | ||||
| 	} | ||||
| 	return i - start, table | ||||
| } | ||||
|  | ||||
| func getColumnInfos(rows [][]string) []ColumnInfo { | ||||
| 	columnCount := 0 | ||||
| 	for _, columns := range rows { | ||||
| 		if n := len(columns); n > columnCount { | ||||
| 			columnCount = n | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	columnInfos := make([]ColumnInfo, columnCount) | ||||
| 	for i := 0; i < columnCount; i++ { | ||||
| 		countNumeric, countNonNumeric := 0, 0 | ||||
| 		for _, columns := range rows { | ||||
| 			if i >= len(columns) { | ||||
| 				continue | ||||
| 			} | ||||
|  | ||||
| 			if n := utf8.RuneCountInString(columns[i]); n > columnInfos[i].Len { | ||||
| 				columnInfos[i].Len = n | ||||
| 			} | ||||
|  | ||||
| 			if m := columnAlignRegexp.FindStringSubmatch(columns[i]); m != nil && isSpecialRow(columns) { | ||||
| 				switch m[1] { | ||||
| 				case "l": | ||||
| 					columnInfos[i].Align = "left" | ||||
| 				case "c": | ||||
| 					columnInfos[i].Align = "center" | ||||
| 				case "r": | ||||
| 					columnInfos[i].Align = "right" | ||||
| 				} | ||||
| 			} else if _, err := strconv.ParseFloat(columns[i], 32); err == nil { | ||||
| 				countNumeric++ | ||||
| 			} else if strings.TrimSpace(columns[i]) != "" { | ||||
| 				countNonNumeric++ | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		if columnInfos[i].Align == "" && countNumeric >= countNonNumeric { | ||||
| 			columnInfos[i].Align = "right" | ||||
| 		} | ||||
| 	} | ||||
| 	return columnInfos | ||||
| } | ||||
|  | ||||
| func isSpecialRow(rawColumns []string) bool { | ||||
| 	isAlignRow := true | ||||
| 	for _, rawColumn := range rawColumns { | ||||
| 		if !columnAlignRegexp.MatchString(rawColumn) && rawColumn != "" { | ||||
| 			isAlignRow = false | ||||
| 		} | ||||
| 	} | ||||
| 	return isAlignRow | ||||
| } | ||||
|  | ||||
| func (n Table) String() string { return orgWriter.nodesAsString(n) } | ||||
							
								
								
									
										19
									
								
								vendor/github.com/niklasfasching/go-org/org/util.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										19
									
								
								vendor/github.com/niklasfasching/go-org/org/util.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,19 @@ | ||||
| package org | ||||
|  | ||||
| func isSecondBlankLine(d *Document, i int) bool { | ||||
| 	if i-1 <= 0 { | ||||
| 		return false | ||||
| 	} | ||||
| 	t1, t2 := d.tokens[i-1], d.tokens[i] | ||||
| 	if t1.kind == "text" && t2.kind == "text" && t1.content == "" && t2.content == "" { | ||||
| 		return true | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| func isImageOrVideoLink(n Node) bool { | ||||
| 	if l, ok := n.(RegularLink); ok && l.Kind() == "video" || l.Kind() == "image" { | ||||
| 		return true | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
							
								
								
									
										103
									
								
								vendor/github.com/niklasfasching/go-org/org/writer.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										103
									
								
								vendor/github.com/niklasfasching/go-org/org/writer.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,103 @@ | ||||
| package org | ||||
|  | ||||
| import "fmt" | ||||
|  | ||||
| // Writer is the interface that is used to export a parsed document into a new format. See Document.Write(). | ||||
| type Writer interface { | ||||
| 	Before(*Document) // Before is called before any nodes are passed to the writer. | ||||
| 	After(*Document)  // After is called after all nodes have been passed to the writer. | ||||
| 	String() string   // String is called at the very end to retrieve the final output. | ||||
|  | ||||
| 	WriterWithExtensions() Writer | ||||
|  | ||||
| 	WriteKeyword(Keyword) | ||||
| 	WriteInclude(Include) | ||||
| 	WriteComment(Comment) | ||||
| 	WriteNodeWithMeta(NodeWithMeta) | ||||
| 	WriteNodeWithName(NodeWithName) | ||||
| 	WriteHeadline(Headline) | ||||
| 	WriteBlock(Block) | ||||
| 	WriteExample(Example) | ||||
| 	WriteDrawer(Drawer) | ||||
| 	WritePropertyDrawer(PropertyDrawer) | ||||
| 	WriteList(List) | ||||
| 	WriteListItem(ListItem) | ||||
| 	WriteDescriptiveListItem(DescriptiveListItem) | ||||
| 	WriteTable(Table) | ||||
| 	WriteHorizontalRule(HorizontalRule) | ||||
| 	WriteParagraph(Paragraph) | ||||
| 	WriteText(Text) | ||||
| 	WriteEmphasis(Emphasis) | ||||
| 	WriteLatexFragment(LatexFragment) | ||||
| 	WriteStatisticToken(StatisticToken) | ||||
| 	WriteExplicitLineBreak(ExplicitLineBreak) | ||||
| 	WriteLineBreak(LineBreak) | ||||
| 	WriteRegularLink(RegularLink) | ||||
| 	WriteTimestamp(Timestamp) | ||||
| 	WriteFootnoteLink(FootnoteLink) | ||||
| 	WriteFootnoteDefinition(FootnoteDefinition) | ||||
| } | ||||
|  | ||||
| func WriteNodes(w Writer, nodes ...Node) { | ||||
| 	w = w.WriterWithExtensions() | ||||
| 	for _, n := range nodes { | ||||
| 		switch n := n.(type) { | ||||
| 		case Keyword: | ||||
| 			w.WriteKeyword(n) | ||||
| 		case Include: | ||||
| 			w.WriteInclude(n) | ||||
| 		case Comment: | ||||
| 			w.WriteComment(n) | ||||
| 		case NodeWithMeta: | ||||
| 			w.WriteNodeWithMeta(n) | ||||
| 		case NodeWithName: | ||||
| 			w.WriteNodeWithName(n) | ||||
| 		case Headline: | ||||
| 			w.WriteHeadline(n) | ||||
| 		case Block: | ||||
| 			w.WriteBlock(n) | ||||
| 		case Example: | ||||
| 			w.WriteExample(n) | ||||
| 		case Drawer: | ||||
| 			w.WriteDrawer(n) | ||||
| 		case PropertyDrawer: | ||||
| 			w.WritePropertyDrawer(n) | ||||
| 		case List: | ||||
| 			w.WriteList(n) | ||||
| 		case ListItem: | ||||
| 			w.WriteListItem(n) | ||||
| 		case DescriptiveListItem: | ||||
| 			w.WriteDescriptiveListItem(n) | ||||
| 		case Table: | ||||
| 			w.WriteTable(n) | ||||
| 		case HorizontalRule: | ||||
| 			w.WriteHorizontalRule(n) | ||||
| 		case Paragraph: | ||||
| 			w.WriteParagraph(n) | ||||
| 		case Text: | ||||
| 			w.WriteText(n) | ||||
| 		case Emphasis: | ||||
| 			w.WriteEmphasis(n) | ||||
| 		case LatexFragment: | ||||
| 			w.WriteLatexFragment(n) | ||||
| 		case StatisticToken: | ||||
| 			w.WriteStatisticToken(n) | ||||
| 		case ExplicitLineBreak: | ||||
| 			w.WriteExplicitLineBreak(n) | ||||
| 		case LineBreak: | ||||
| 			w.WriteLineBreak(n) | ||||
| 		case RegularLink: | ||||
| 			w.WriteRegularLink(n) | ||||
| 		case Timestamp: | ||||
| 			w.WriteTimestamp(n) | ||||
| 		case FootnoteLink: | ||||
| 			w.WriteFootnoteLink(n) | ||||
| 		case FootnoteDefinition: | ||||
| 			w.WriteFootnoteDefinition(n) | ||||
| 		default: | ||||
| 			if n != nil { | ||||
| 				panic(fmt.Sprintf("bad node %T %#v", n, n)) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										32
									
								
								vendor/github.com/russross/blackfriday/doc.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										32
									
								
								vendor/github.com/russross/blackfriday/doc.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -1,32 +0,0 @@ | ||||
| // Package blackfriday is a Markdown processor. | ||||
| // | ||||
| // It translates plain text with simple formatting rules into HTML or LaTeX. | ||||
| // | ||||
| // Sanitized Anchor Names | ||||
| // | ||||
| // Blackfriday includes an algorithm for creating sanitized anchor names | ||||
| // corresponding to a given input text. This algorithm is used to create | ||||
| // anchors for headings when EXTENSION_AUTO_HEADER_IDS is enabled. The | ||||
| // algorithm is specified below, so that other packages can create | ||||
| // compatible anchor names and links to those anchors. | ||||
| // | ||||
| // The algorithm iterates over the input text, interpreted as UTF-8, | ||||
| // one Unicode code point (rune) at a time. All runes that are letters (category L) | ||||
| // or numbers (category N) are considered valid characters. They are mapped to | ||||
| // lower case, and included in the output. All other runes are considered | ||||
| // invalid characters. Invalid characters that preceed the first valid character, | ||||
| // as well as invalid character that follow the last valid character | ||||
| // are dropped completely. All other sequences of invalid characters | ||||
| // between two valid characters are replaced with a single dash character '-'. | ||||
| // | ||||
| // SanitizedAnchorName exposes this functionality, and can be used to | ||||
| // create compatible links to the anchor names generated by blackfriday. | ||||
| // This algorithm is also implemented in a small standalone package at | ||||
| // github.com/shurcooL/sanitized_anchor_name. It can be useful for clients | ||||
| // that want a small package and don't need full functionality of blackfriday. | ||||
| package blackfriday | ||||
|  | ||||
| // NOTE: Keep Sanitized Anchor Name algorithm in sync with package | ||||
| //       github.com/shurcooL/sanitized_anchor_name. | ||||
| //       Otherwise, users of sanitized_anchor_name will get anchor names | ||||
| //       that are incompatible with those generated by blackfriday. | ||||
							
								
								
									
										938
									
								
								vendor/github.com/russross/blackfriday/html.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										938
									
								
								vendor/github.com/russross/blackfriday/html.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -1,938 +0,0 @@ | ||||
| // | ||||
| // Blackfriday Markdown Processor | ||||
| // Available at http://github.com/russross/blackfriday | ||||
| // | ||||
| // Copyright © 2011 Russ Ross <russ@russross.com>. | ||||
| // Distributed under the Simplified BSD License. | ||||
| // See README.md for details. | ||||
| // | ||||
|  | ||||
| // | ||||
| // | ||||
| // HTML rendering backend | ||||
| // | ||||
| // | ||||
|  | ||||
| package blackfriday | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"fmt" | ||||
| 	"regexp" | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
| ) | ||||
|  | ||||
| // Html renderer configuration options. | ||||
| const ( | ||||
| 	HTML_SKIP_HTML                 = 1 << iota // skip preformatted HTML blocks | ||||
| 	HTML_SKIP_STYLE                            // skip embedded <style> elements | ||||
| 	HTML_SKIP_IMAGES                           // skip embedded images | ||||
| 	HTML_SKIP_LINKS                            // skip all links | ||||
| 	HTML_SAFELINK                              // only link to trusted protocols | ||||
| 	HTML_NOFOLLOW_LINKS                        // only link with rel="nofollow" | ||||
| 	HTML_NOREFERRER_LINKS                      // only link with rel="noreferrer" | ||||
| 	HTML_HREF_TARGET_BLANK                     // add a blank target | ||||
| 	HTML_TOC                                   // generate a table of contents | ||||
| 	HTML_OMIT_CONTENTS                         // skip the main contents (for a standalone table of contents) | ||||
| 	HTML_COMPLETE_PAGE                         // generate a complete HTML page | ||||
| 	HTML_USE_XHTML                             // generate XHTML output instead of HTML | ||||
| 	HTML_USE_SMARTYPANTS                       // enable smart punctuation substitutions | ||||
| 	HTML_SMARTYPANTS_FRACTIONS                 // enable smart fractions (with HTML_USE_SMARTYPANTS) | ||||
| 	HTML_SMARTYPANTS_DASHES                    // enable smart dashes (with HTML_USE_SMARTYPANTS) | ||||
| 	HTML_SMARTYPANTS_LATEX_DASHES              // enable LaTeX-style dashes (with HTML_USE_SMARTYPANTS and HTML_SMARTYPANTS_DASHES) | ||||
| 	HTML_SMARTYPANTS_ANGLED_QUOTES             // enable angled double quotes (with HTML_USE_SMARTYPANTS) for double quotes rendering | ||||
| 	HTML_SMARTYPANTS_QUOTES_NBSP               // enable "French guillemets" (with HTML_USE_SMARTYPANTS) | ||||
| 	HTML_FOOTNOTE_RETURN_LINKS                 // generate a link at the end of a footnote to return to the source | ||||
| ) | ||||
|  | ||||
| var ( | ||||
| 	alignments = []string{ | ||||
| 		"left", | ||||
| 		"right", | ||||
| 		"center", | ||||
| 	} | ||||
|  | ||||
| 	// TODO: improve this regexp to catch all possible entities: | ||||
| 	htmlEntity = regexp.MustCompile(`&[a-z]{2,5};`) | ||||
| ) | ||||
|  | ||||
| type HtmlRendererParameters struct { | ||||
| 	// Prepend this text to each relative URL. | ||||
| 	AbsolutePrefix string | ||||
| 	// Add this text to each footnote anchor, to ensure uniqueness. | ||||
| 	FootnoteAnchorPrefix string | ||||
| 	// Show this text inside the <a> tag for a footnote return link, if the | ||||
| 	// HTML_FOOTNOTE_RETURN_LINKS flag is enabled. If blank, the string | ||||
| 	// <sup>[return]</sup> is used. | ||||
| 	FootnoteReturnLinkContents string | ||||
| 	// If set, add this text to the front of each Header ID, to ensure | ||||
| 	// uniqueness. | ||||
| 	HeaderIDPrefix string | ||||
| 	// If set, add this text to the back of each Header ID, to ensure uniqueness. | ||||
| 	HeaderIDSuffix string | ||||
| } | ||||
|  | ||||
| // Html is a type that implements the Renderer interface for HTML output. | ||||
| // | ||||
| // Do not create this directly, instead use the HtmlRenderer function. | ||||
| type Html struct { | ||||
| 	flags    int    // HTML_* options | ||||
| 	closeTag string // how to end singleton tags: either " />" or ">" | ||||
| 	title    string // document title | ||||
| 	css      string // optional css file url (used with HTML_COMPLETE_PAGE) | ||||
|  | ||||
| 	parameters HtmlRendererParameters | ||||
|  | ||||
| 	// table of contents data | ||||
| 	tocMarker    int | ||||
| 	headerCount  int | ||||
| 	currentLevel int | ||||
| 	toc          *bytes.Buffer | ||||
|  | ||||
| 	// Track header IDs to prevent ID collision in a single generation. | ||||
| 	headerIDs map[string]int | ||||
|  | ||||
| 	smartypants *smartypantsRenderer | ||||
| } | ||||
|  | ||||
| const ( | ||||
| 	xhtmlClose = " />" | ||||
| 	htmlClose  = ">" | ||||
| ) | ||||
|  | ||||
| // HtmlRenderer creates and configures an Html object, which | ||||
| // satisfies the Renderer interface. | ||||
| // | ||||
| // flags is a set of HTML_* options ORed together. | ||||
| // title is the title of the document, and css is a URL for the document's | ||||
| // stylesheet. | ||||
| // title and css are only used when HTML_COMPLETE_PAGE is selected. | ||||
| func HtmlRenderer(flags int, title string, css string) Renderer { | ||||
| 	return HtmlRendererWithParameters(flags, title, css, HtmlRendererParameters{}) | ||||
| } | ||||
|  | ||||
| func HtmlRendererWithParameters(flags int, title string, | ||||
| 	css string, renderParameters HtmlRendererParameters) Renderer { | ||||
| 	// configure the rendering engine | ||||
| 	closeTag := htmlClose | ||||
| 	if flags&HTML_USE_XHTML != 0 { | ||||
| 		closeTag = xhtmlClose | ||||
| 	} | ||||
|  | ||||
| 	if renderParameters.FootnoteReturnLinkContents == "" { | ||||
| 		renderParameters.FootnoteReturnLinkContents = `<sup>[return]</sup>` | ||||
| 	} | ||||
|  | ||||
| 	return &Html{ | ||||
| 		flags:      flags, | ||||
| 		closeTag:   closeTag, | ||||
| 		title:      title, | ||||
| 		css:        css, | ||||
| 		parameters: renderParameters, | ||||
|  | ||||
| 		headerCount:  0, | ||||
| 		currentLevel: 0, | ||||
| 		toc:          new(bytes.Buffer), | ||||
|  | ||||
| 		headerIDs: make(map[string]int), | ||||
|  | ||||
| 		smartypants: smartypants(flags), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Using if statements is a bit faster than a switch statement. As the compiler | ||||
| // improves, this should be unnecessary this is only worthwhile because | ||||
| // attrEscape is the single largest CPU user in normal use. | ||||
| // Also tried using map, but that gave a ~3x slowdown. | ||||
| func escapeSingleChar(char byte) (string, bool) { | ||||
| 	if char == '"' { | ||||
| 		return """, true | ||||
| 	} | ||||
| 	if char == '&' { | ||||
| 		return "&", true | ||||
| 	} | ||||
| 	if char == '<' { | ||||
| 		return "<", true | ||||
| 	} | ||||
| 	if char == '>' { | ||||
| 		return ">", true | ||||
| 	} | ||||
| 	return "", false | ||||
| } | ||||
|  | ||||
| func attrEscape(out *bytes.Buffer, src []byte) { | ||||
| 	org := 0 | ||||
| 	for i, ch := range src { | ||||
| 		if entity, ok := escapeSingleChar(ch); ok { | ||||
| 			if i > org { | ||||
| 				// copy all the normal characters since the last escape | ||||
| 				out.Write(src[org:i]) | ||||
| 			} | ||||
| 			org = i + 1 | ||||
| 			out.WriteString(entity) | ||||
| 		} | ||||
| 	} | ||||
| 	if org < len(src) { | ||||
| 		out.Write(src[org:]) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func entityEscapeWithSkip(out *bytes.Buffer, src []byte, skipRanges [][]int) { | ||||
| 	end := 0 | ||||
| 	for _, rang := range skipRanges { | ||||
| 		attrEscape(out, src[end:rang[0]]) | ||||
| 		out.Write(src[rang[0]:rang[1]]) | ||||
| 		end = rang[1] | ||||
| 	} | ||||
| 	attrEscape(out, src[end:]) | ||||
| } | ||||
|  | ||||
| func (options *Html) GetFlags() int { | ||||
| 	return options.flags | ||||
| } | ||||
|  | ||||
| func (options *Html) TitleBlock(out *bytes.Buffer, text []byte) { | ||||
| 	text = bytes.TrimPrefix(text, []byte("% ")) | ||||
| 	text = bytes.Replace(text, []byte("\n% "), []byte("\n"), -1) | ||||
| 	out.WriteString("<h1 class=\"title\">") | ||||
| 	out.Write(text) | ||||
| 	out.WriteString("\n</h1>") | ||||
| } | ||||
|  | ||||
| func (options *Html) Header(out *bytes.Buffer, text func() bool, level int, id string) { | ||||
| 	marker := out.Len() | ||||
| 	doubleSpace(out) | ||||
|  | ||||
| 	if id == "" && options.flags&HTML_TOC != 0 { | ||||
| 		id = fmt.Sprintf("toc_%d", options.headerCount) | ||||
| 	} | ||||
|  | ||||
| 	if id != "" { | ||||
| 		id = options.ensureUniqueHeaderID(id) | ||||
|  | ||||
| 		if options.parameters.HeaderIDPrefix != "" { | ||||
| 			id = options.parameters.HeaderIDPrefix + id | ||||
| 		} | ||||
|  | ||||
| 		if options.parameters.HeaderIDSuffix != "" { | ||||
| 			id = id + options.parameters.HeaderIDSuffix | ||||
| 		} | ||||
|  | ||||
| 		out.WriteString(fmt.Sprintf("<h%d id=\"%s\">", level, id)) | ||||
| 	} else { | ||||
| 		out.WriteString(fmt.Sprintf("<h%d>", level)) | ||||
| 	} | ||||
|  | ||||
| 	tocMarker := out.Len() | ||||
| 	if !text() { | ||||
| 		out.Truncate(marker) | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	// are we building a table of contents? | ||||
| 	if options.flags&HTML_TOC != 0 { | ||||
| 		options.TocHeaderWithAnchor(out.Bytes()[tocMarker:], level, id) | ||||
| 	} | ||||
|  | ||||
| 	out.WriteString(fmt.Sprintf("</h%d>\n", level)) | ||||
| } | ||||
|  | ||||
| func (options *Html) BlockHtml(out *bytes.Buffer, text []byte) { | ||||
| 	if options.flags&HTML_SKIP_HTML != 0 { | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	doubleSpace(out) | ||||
| 	out.Write(text) | ||||
| 	out.WriteByte('\n') | ||||
| } | ||||
|  | ||||
| func (options *Html) HRule(out *bytes.Buffer) { | ||||
| 	doubleSpace(out) | ||||
| 	out.WriteString("<hr") | ||||
| 	out.WriteString(options.closeTag) | ||||
| 	out.WriteByte('\n') | ||||
| } | ||||
|  | ||||
| func (options *Html) BlockCode(out *bytes.Buffer, text []byte, info string) { | ||||
| 	doubleSpace(out) | ||||
|  | ||||
| 	endOfLang := strings.IndexAny(info, "\t ") | ||||
| 	if endOfLang < 0 { | ||||
| 		endOfLang = len(info) | ||||
| 	} | ||||
| 	lang := info[:endOfLang] | ||||
| 	if len(lang) == 0 || lang == "." { | ||||
| 		out.WriteString("<pre><code>") | ||||
| 	} else { | ||||
| 		out.WriteString("<pre><code class=\"language-") | ||||
| 		attrEscape(out, []byte(lang)) | ||||
| 		out.WriteString("\">") | ||||
| 	} | ||||
| 	attrEscape(out, text) | ||||
| 	out.WriteString("</code></pre>\n") | ||||
| } | ||||
|  | ||||
| func (options *Html) BlockQuote(out *bytes.Buffer, text []byte) { | ||||
| 	doubleSpace(out) | ||||
| 	out.WriteString("<blockquote>\n") | ||||
| 	out.Write(text) | ||||
| 	out.WriteString("</blockquote>\n") | ||||
| } | ||||
|  | ||||
| func (options *Html) Table(out *bytes.Buffer, header []byte, body []byte, columnData []int) { | ||||
| 	doubleSpace(out) | ||||
| 	out.WriteString("<table>\n<thead>\n") | ||||
| 	out.Write(header) | ||||
| 	out.WriteString("</thead>\n\n<tbody>\n") | ||||
| 	out.Write(body) | ||||
| 	out.WriteString("</tbody>\n</table>\n") | ||||
| } | ||||
|  | ||||
| func (options *Html) TableRow(out *bytes.Buffer, text []byte) { | ||||
| 	doubleSpace(out) | ||||
| 	out.WriteString("<tr>\n") | ||||
| 	out.Write(text) | ||||
| 	out.WriteString("\n</tr>\n") | ||||
| } | ||||
|  | ||||
| func (options *Html) TableHeaderCell(out *bytes.Buffer, text []byte, align int) { | ||||
| 	doubleSpace(out) | ||||
| 	switch align { | ||||
| 	case TABLE_ALIGNMENT_LEFT: | ||||
| 		out.WriteString("<th align=\"left\">") | ||||
| 	case TABLE_ALIGNMENT_RIGHT: | ||||
| 		out.WriteString("<th align=\"right\">") | ||||
| 	case TABLE_ALIGNMENT_CENTER: | ||||
| 		out.WriteString("<th align=\"center\">") | ||||
| 	default: | ||||
| 		out.WriteString("<th>") | ||||
| 	} | ||||
|  | ||||
| 	out.Write(text) | ||||
| 	out.WriteString("</th>") | ||||
| } | ||||
|  | ||||
| func (options *Html) TableCell(out *bytes.Buffer, text []byte, align int) { | ||||
| 	doubleSpace(out) | ||||
| 	switch align { | ||||
| 	case TABLE_ALIGNMENT_LEFT: | ||||
| 		out.WriteString("<td align=\"left\">") | ||||
| 	case TABLE_ALIGNMENT_RIGHT: | ||||
| 		out.WriteString("<td align=\"right\">") | ||||
| 	case TABLE_ALIGNMENT_CENTER: | ||||
| 		out.WriteString("<td align=\"center\">") | ||||
| 	default: | ||||
| 		out.WriteString("<td>") | ||||
| 	} | ||||
|  | ||||
| 	out.Write(text) | ||||
| 	out.WriteString("</td>") | ||||
| } | ||||
|  | ||||
| func (options *Html) Footnotes(out *bytes.Buffer, text func() bool) { | ||||
| 	out.WriteString("<div class=\"footnotes\">\n") | ||||
| 	options.HRule(out) | ||||
| 	options.List(out, text, LIST_TYPE_ORDERED) | ||||
| 	out.WriteString("</div>\n") | ||||
| } | ||||
|  | ||||
| func (options *Html) FootnoteItem(out *bytes.Buffer, name, text []byte, flags int) { | ||||
| 	if flags&LIST_ITEM_CONTAINS_BLOCK != 0 || flags&LIST_ITEM_BEGINNING_OF_LIST != 0 { | ||||
| 		doubleSpace(out) | ||||
| 	} | ||||
| 	slug := slugify(name) | ||||
| 	out.WriteString(`<li id="`) | ||||
| 	out.WriteString(`fn:`) | ||||
| 	out.WriteString(options.parameters.FootnoteAnchorPrefix) | ||||
| 	out.Write(slug) | ||||
| 	out.WriteString(`">`) | ||||
| 	out.Write(text) | ||||
| 	if options.flags&HTML_FOOTNOTE_RETURN_LINKS != 0 { | ||||
| 		out.WriteString(` <a class="footnote-return" href="#`) | ||||
| 		out.WriteString(`fnref:`) | ||||
| 		out.WriteString(options.parameters.FootnoteAnchorPrefix) | ||||
| 		out.Write(slug) | ||||
| 		out.WriteString(`">`) | ||||
| 		out.WriteString(options.parameters.FootnoteReturnLinkContents) | ||||
| 		out.WriteString(`</a>`) | ||||
| 	} | ||||
| 	out.WriteString("</li>\n") | ||||
| } | ||||
|  | ||||
| func (options *Html) List(out *bytes.Buffer, text func() bool, flags int) { | ||||
| 	marker := out.Len() | ||||
| 	doubleSpace(out) | ||||
|  | ||||
| 	if flags&LIST_TYPE_DEFINITION != 0 { | ||||
| 		out.WriteString("<dl>") | ||||
| 	} else if flags&LIST_TYPE_ORDERED != 0 { | ||||
| 		out.WriteString("<ol>") | ||||
| 	} else { | ||||
| 		out.WriteString("<ul>") | ||||
| 	} | ||||
| 	if !text() { | ||||
| 		out.Truncate(marker) | ||||
| 		return | ||||
| 	} | ||||
| 	if flags&LIST_TYPE_DEFINITION != 0 { | ||||
| 		out.WriteString("</dl>\n") | ||||
| 	} else if flags&LIST_TYPE_ORDERED != 0 { | ||||
| 		out.WriteString("</ol>\n") | ||||
| 	} else { | ||||
| 		out.WriteString("</ul>\n") | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (options *Html) ListItem(out *bytes.Buffer, text []byte, flags int) { | ||||
| 	if (flags&LIST_ITEM_CONTAINS_BLOCK != 0 && flags&LIST_TYPE_DEFINITION == 0) || | ||||
| 		flags&LIST_ITEM_BEGINNING_OF_LIST != 0 { | ||||
| 		doubleSpace(out) | ||||
| 	} | ||||
| 	if flags&LIST_TYPE_TERM != 0 { | ||||
| 		out.WriteString("<dt>") | ||||
| 	} else if flags&LIST_TYPE_DEFINITION != 0 { | ||||
| 		out.WriteString("<dd>") | ||||
| 	} else { | ||||
| 		out.WriteString("<li>") | ||||
| 	} | ||||
| 	out.Write(text) | ||||
| 	if flags&LIST_TYPE_TERM != 0 { | ||||
| 		out.WriteString("</dt>\n") | ||||
| 	} else if flags&LIST_TYPE_DEFINITION != 0 { | ||||
| 		out.WriteString("</dd>\n") | ||||
| 	} else { | ||||
| 		out.WriteString("</li>\n") | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (options *Html) Paragraph(out *bytes.Buffer, text func() bool) { | ||||
| 	marker := out.Len() | ||||
| 	doubleSpace(out) | ||||
|  | ||||
| 	out.WriteString("<p>") | ||||
| 	if !text() { | ||||
| 		out.Truncate(marker) | ||||
| 		return | ||||
| 	} | ||||
| 	out.WriteString("</p>\n") | ||||
| } | ||||
|  | ||||
| func (options *Html) AutoLink(out *bytes.Buffer, link []byte, kind int) { | ||||
| 	skipRanges := htmlEntity.FindAllIndex(link, -1) | ||||
| 	if options.flags&HTML_SAFELINK != 0 && !isSafeLink(link) && kind != LINK_TYPE_EMAIL { | ||||
| 		// mark it but don't link it if it is not a safe link: no smartypants | ||||
| 		out.WriteString("<tt>") | ||||
| 		entityEscapeWithSkip(out, link, skipRanges) | ||||
| 		out.WriteString("</tt>") | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	out.WriteString("<a href=\"") | ||||
| 	if kind == LINK_TYPE_EMAIL { | ||||
| 		out.WriteString("mailto:") | ||||
| 	} else { | ||||
| 		options.maybeWriteAbsolutePrefix(out, link) | ||||
| 	} | ||||
|  | ||||
| 	entityEscapeWithSkip(out, link, skipRanges) | ||||
|  | ||||
| 	var relAttrs []string | ||||
| 	if options.flags&HTML_NOFOLLOW_LINKS != 0 && !isRelativeLink(link) { | ||||
| 		relAttrs = append(relAttrs, "nofollow") | ||||
| 	} | ||||
| 	if options.flags&HTML_NOREFERRER_LINKS != 0 && !isRelativeLink(link) { | ||||
| 		relAttrs = append(relAttrs, "noreferrer") | ||||
| 	} | ||||
| 	if len(relAttrs) > 0 { | ||||
| 		out.WriteString(fmt.Sprintf("\" rel=\"%s", strings.Join(relAttrs, " "))) | ||||
| 	} | ||||
|  | ||||
| 	// blank target only add to external link | ||||
| 	if options.flags&HTML_HREF_TARGET_BLANK != 0 && !isRelativeLink(link) { | ||||
| 		out.WriteString("\" target=\"_blank") | ||||
| 	} | ||||
|  | ||||
| 	out.WriteString("\">") | ||||
|  | ||||
| 	// Pretty print: if we get an email address as | ||||
| 	// an actual URI, e.g. `mailto:foo@bar.com`, we don't | ||||
| 	// want to print the `mailto:` prefix | ||||
| 	switch { | ||||
| 	case bytes.HasPrefix(link, []byte("mailto://")): | ||||
| 		attrEscape(out, link[len("mailto://"):]) | ||||
| 	case bytes.HasPrefix(link, []byte("mailto:")): | ||||
| 		attrEscape(out, link[len("mailto:"):]) | ||||
| 	default: | ||||
| 		entityEscapeWithSkip(out, link, skipRanges) | ||||
| 	} | ||||
|  | ||||
| 	out.WriteString("</a>") | ||||
| } | ||||
|  | ||||
| func (options *Html) CodeSpan(out *bytes.Buffer, text []byte) { | ||||
| 	out.WriteString("<code>") | ||||
| 	attrEscape(out, text) | ||||
| 	out.WriteString("</code>") | ||||
| } | ||||
|  | ||||
| func (options *Html) DoubleEmphasis(out *bytes.Buffer, text []byte) { | ||||
| 	out.WriteString("<strong>") | ||||
| 	out.Write(text) | ||||
| 	out.WriteString("</strong>") | ||||
| } | ||||
|  | ||||
| func (options *Html) Emphasis(out *bytes.Buffer, text []byte) { | ||||
| 	if len(text) == 0 { | ||||
| 		return | ||||
| 	} | ||||
| 	out.WriteString("<em>") | ||||
| 	out.Write(text) | ||||
| 	out.WriteString("</em>") | ||||
| } | ||||
|  | ||||
| func (options *Html) maybeWriteAbsolutePrefix(out *bytes.Buffer, link []byte) { | ||||
| 	if options.parameters.AbsolutePrefix != "" && isRelativeLink(link) && link[0] != '.' { | ||||
| 		out.WriteString(options.parameters.AbsolutePrefix) | ||||
| 		if link[0] != '/' { | ||||
| 			out.WriteByte('/') | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (options *Html) Image(out *bytes.Buffer, link []byte, title []byte, alt []byte) { | ||||
| 	if options.flags&HTML_SKIP_IMAGES != 0 { | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	out.WriteString("<img src=\"") | ||||
| 	options.maybeWriteAbsolutePrefix(out, link) | ||||
| 	attrEscape(out, link) | ||||
| 	out.WriteString("\" alt=\"") | ||||
| 	if len(alt) > 0 { | ||||
| 		attrEscape(out, alt) | ||||
| 	} | ||||
| 	if len(title) > 0 { | ||||
| 		out.WriteString("\" title=\"") | ||||
| 		attrEscape(out, title) | ||||
| 	} | ||||
|  | ||||
| 	out.WriteByte('"') | ||||
| 	out.WriteString(options.closeTag) | ||||
| } | ||||
|  | ||||
| func (options *Html) LineBreak(out *bytes.Buffer) { | ||||
| 	out.WriteString("<br") | ||||
| 	out.WriteString(options.closeTag) | ||||
| 	out.WriteByte('\n') | ||||
| } | ||||
|  | ||||
| func (options *Html) Link(out *bytes.Buffer, link []byte, title []byte, content []byte) { | ||||
| 	if options.flags&HTML_SKIP_LINKS != 0 { | ||||
| 		// write the link text out but don't link it, just mark it with typewriter font | ||||
| 		out.WriteString("<tt>") | ||||
| 		attrEscape(out, content) | ||||
| 		out.WriteString("</tt>") | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	if options.flags&HTML_SAFELINK != 0 && !isSafeLink(link) { | ||||
| 		// write the link text out but don't link it, just mark it with typewriter font | ||||
| 		out.WriteString("<tt>") | ||||
| 		attrEscape(out, content) | ||||
| 		out.WriteString("</tt>") | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	out.WriteString("<a href=\"") | ||||
| 	options.maybeWriteAbsolutePrefix(out, link) | ||||
| 	attrEscape(out, link) | ||||
| 	if len(title) > 0 { | ||||
| 		out.WriteString("\" title=\"") | ||||
| 		attrEscape(out, title) | ||||
| 	} | ||||
| 	var relAttrs []string | ||||
| 	if options.flags&HTML_NOFOLLOW_LINKS != 0 && !isRelativeLink(link) { | ||||
| 		relAttrs = append(relAttrs, "nofollow") | ||||
| 	} | ||||
| 	if options.flags&HTML_NOREFERRER_LINKS != 0 && !isRelativeLink(link) { | ||||
| 		relAttrs = append(relAttrs, "noreferrer") | ||||
| 	} | ||||
| 	if len(relAttrs) > 0 { | ||||
| 		out.WriteString(fmt.Sprintf("\" rel=\"%s", strings.Join(relAttrs, " "))) | ||||
| 	} | ||||
|  | ||||
| 	// blank target only add to external link | ||||
| 	if options.flags&HTML_HREF_TARGET_BLANK != 0 && !isRelativeLink(link) { | ||||
| 		out.WriteString("\" target=\"_blank") | ||||
| 	} | ||||
|  | ||||
| 	out.WriteString("\">") | ||||
| 	out.Write(content) | ||||
| 	out.WriteString("</a>") | ||||
| 	return | ||||
| } | ||||
|  | ||||
| func (options *Html) RawHtmlTag(out *bytes.Buffer, text []byte) { | ||||
| 	if options.flags&HTML_SKIP_HTML != 0 { | ||||
| 		return | ||||
| 	} | ||||
| 	if options.flags&HTML_SKIP_STYLE != 0 && isHtmlTag(text, "style") { | ||||
| 		return | ||||
| 	} | ||||
| 	if options.flags&HTML_SKIP_LINKS != 0 && isHtmlTag(text, "a") { | ||||
| 		return | ||||
| 	} | ||||
| 	if options.flags&HTML_SKIP_IMAGES != 0 && isHtmlTag(text, "img") { | ||||
| 		return | ||||
| 	} | ||||
| 	out.Write(text) | ||||
| } | ||||
|  | ||||
| func (options *Html) TripleEmphasis(out *bytes.Buffer, text []byte) { | ||||
| 	out.WriteString("<strong><em>") | ||||
| 	out.Write(text) | ||||
| 	out.WriteString("</em></strong>") | ||||
| } | ||||
|  | ||||
| func (options *Html) StrikeThrough(out *bytes.Buffer, text []byte) { | ||||
| 	out.WriteString("<del>") | ||||
| 	out.Write(text) | ||||
| 	out.WriteString("</del>") | ||||
| } | ||||
|  | ||||
| func (options *Html) FootnoteRef(out *bytes.Buffer, ref []byte, id int) { | ||||
| 	slug := slugify(ref) | ||||
| 	out.WriteString(`<sup class="footnote-ref" id="`) | ||||
| 	out.WriteString(`fnref:`) | ||||
| 	out.WriteString(options.parameters.FootnoteAnchorPrefix) | ||||
| 	out.Write(slug) | ||||
| 	out.WriteString(`"><a href="#`) | ||||
| 	out.WriteString(`fn:`) | ||||
| 	out.WriteString(options.parameters.FootnoteAnchorPrefix) | ||||
| 	out.Write(slug) | ||||
| 	out.WriteString(`">`) | ||||
| 	out.WriteString(strconv.Itoa(id)) | ||||
| 	out.WriteString(`</a></sup>`) | ||||
| } | ||||
|  | ||||
| func (options *Html) Entity(out *bytes.Buffer, entity []byte) { | ||||
| 	out.Write(entity) | ||||
| } | ||||
|  | ||||
| func (options *Html) NormalText(out *bytes.Buffer, text []byte) { | ||||
| 	if options.flags&HTML_USE_SMARTYPANTS != 0 { | ||||
| 		options.Smartypants(out, text) | ||||
| 	} else { | ||||
| 		attrEscape(out, text) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (options *Html) Smartypants(out *bytes.Buffer, text []byte) { | ||||
| 	smrt := smartypantsData{false, false} | ||||
|  | ||||
| 	// first do normal entity escaping | ||||
| 	var escaped bytes.Buffer | ||||
| 	attrEscape(&escaped, text) | ||||
| 	text = escaped.Bytes() | ||||
|  | ||||
| 	mark := 0 | ||||
| 	for i := 0; i < len(text); i++ { | ||||
| 		if action := options.smartypants[text[i]]; action != nil { | ||||
| 			if i > mark { | ||||
| 				out.Write(text[mark:i]) | ||||
| 			} | ||||
|  | ||||
| 			previousChar := byte(0) | ||||
| 			if i > 0 { | ||||
| 				previousChar = text[i-1] | ||||
| 			} | ||||
| 			i += action(out, &smrt, previousChar, text[i:]) | ||||
| 			mark = i + 1 | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if mark < len(text) { | ||||
| 		out.Write(text[mark:]) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (options *Html) DocumentHeader(out *bytes.Buffer) { | ||||
| 	if options.flags&HTML_COMPLETE_PAGE == 0 { | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	ending := "" | ||||
| 	if options.flags&HTML_USE_XHTML != 0 { | ||||
| 		out.WriteString("<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" ") | ||||
| 		out.WriteString("\"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n") | ||||
| 		out.WriteString("<html xmlns=\"http://www.w3.org/1999/xhtml\">\n") | ||||
| 		ending = " /" | ||||
| 	} else { | ||||
| 		out.WriteString("<!DOCTYPE html>\n") | ||||
| 		out.WriteString("<html>\n") | ||||
| 	} | ||||
| 	out.WriteString("<head>\n") | ||||
| 	out.WriteString("  <title>") | ||||
| 	options.NormalText(out, []byte(options.title)) | ||||
| 	out.WriteString("</title>\n") | ||||
| 	out.WriteString("  <meta name=\"GENERATOR\" content=\"Blackfriday Markdown Processor v") | ||||
| 	out.WriteString(VERSION) | ||||
| 	out.WriteString("\"") | ||||
| 	out.WriteString(ending) | ||||
| 	out.WriteString(">\n") | ||||
| 	out.WriteString("  <meta charset=\"utf-8\"") | ||||
| 	out.WriteString(ending) | ||||
| 	out.WriteString(">\n") | ||||
| 	if options.css != "" { | ||||
| 		out.WriteString("  <link rel=\"stylesheet\" type=\"text/css\" href=\"") | ||||
| 		attrEscape(out, []byte(options.css)) | ||||
| 		out.WriteString("\"") | ||||
| 		out.WriteString(ending) | ||||
| 		out.WriteString(">\n") | ||||
| 	} | ||||
| 	out.WriteString("</head>\n") | ||||
| 	out.WriteString("<body>\n") | ||||
|  | ||||
| 	options.tocMarker = out.Len() | ||||
| } | ||||
|  | ||||
| func (options *Html) DocumentFooter(out *bytes.Buffer) { | ||||
| 	// finalize and insert the table of contents | ||||
| 	if options.flags&HTML_TOC != 0 { | ||||
| 		options.TocFinalize() | ||||
|  | ||||
| 		// now we have to insert the table of contents into the document | ||||
| 		var temp bytes.Buffer | ||||
|  | ||||
| 		// start by making a copy of everything after the document header | ||||
| 		temp.Write(out.Bytes()[options.tocMarker:]) | ||||
|  | ||||
| 		// now clear the copied material from the main output buffer | ||||
| 		out.Truncate(options.tocMarker) | ||||
|  | ||||
| 		// corner case spacing issue | ||||
| 		if options.flags&HTML_COMPLETE_PAGE != 0 { | ||||
| 			out.WriteByte('\n') | ||||
| 		} | ||||
|  | ||||
| 		// insert the table of contents | ||||
| 		out.WriteString("<nav>\n") | ||||
| 		out.Write(options.toc.Bytes()) | ||||
| 		out.WriteString("</nav>\n") | ||||
|  | ||||
| 		// corner case spacing issue | ||||
| 		if options.flags&HTML_COMPLETE_PAGE == 0 && options.flags&HTML_OMIT_CONTENTS == 0 { | ||||
| 			out.WriteByte('\n') | ||||
| 		} | ||||
|  | ||||
| 		// write out everything that came after it | ||||
| 		if options.flags&HTML_OMIT_CONTENTS == 0 { | ||||
| 			out.Write(temp.Bytes()) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if options.flags&HTML_COMPLETE_PAGE != 0 { | ||||
| 		out.WriteString("\n</body>\n") | ||||
| 		out.WriteString("</html>\n") | ||||
| 	} | ||||
|  | ||||
| } | ||||
|  | ||||
| func (options *Html) TocHeaderWithAnchor(text []byte, level int, anchor string) { | ||||
| 	for level > options.currentLevel { | ||||
| 		switch { | ||||
| 		case bytes.HasSuffix(options.toc.Bytes(), []byte("</li>\n")): | ||||
| 			// this sublist can nest underneath a header | ||||
| 			size := options.toc.Len() | ||||
| 			options.toc.Truncate(size - len("</li>\n")) | ||||
|  | ||||
| 		case options.currentLevel > 0: | ||||
| 			options.toc.WriteString("<li>") | ||||
| 		} | ||||
| 		if options.toc.Len() > 0 { | ||||
| 			options.toc.WriteByte('\n') | ||||
| 		} | ||||
| 		options.toc.WriteString("<ul>\n") | ||||
| 		options.currentLevel++ | ||||
| 	} | ||||
|  | ||||
| 	for level < options.currentLevel { | ||||
| 		options.toc.WriteString("</ul>") | ||||
| 		if options.currentLevel > 1 { | ||||
| 			options.toc.WriteString("</li>\n") | ||||
| 		} | ||||
| 		options.currentLevel-- | ||||
| 	} | ||||
|  | ||||
| 	options.toc.WriteString("<li><a href=\"#") | ||||
| 	if anchor != "" { | ||||
| 		options.toc.WriteString(anchor) | ||||
| 	} else { | ||||
| 		options.toc.WriteString("toc_") | ||||
| 		options.toc.WriteString(strconv.Itoa(options.headerCount)) | ||||
| 	} | ||||
| 	options.toc.WriteString("\">") | ||||
| 	options.headerCount++ | ||||
|  | ||||
| 	options.toc.Write(text) | ||||
|  | ||||
| 	options.toc.WriteString("</a></li>\n") | ||||
| } | ||||
|  | ||||
| func (options *Html) TocHeader(text []byte, level int) { | ||||
| 	options.TocHeaderWithAnchor(text, level, "") | ||||
| } | ||||
|  | ||||
| func (options *Html) TocFinalize() { | ||||
| 	for options.currentLevel > 1 { | ||||
| 		options.toc.WriteString("</ul></li>\n") | ||||
| 		options.currentLevel-- | ||||
| 	} | ||||
|  | ||||
| 	if options.currentLevel > 0 { | ||||
| 		options.toc.WriteString("</ul>\n") | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func isHtmlTag(tag []byte, tagname string) bool { | ||||
| 	found, _ := findHtmlTagPos(tag, tagname) | ||||
| 	return found | ||||
| } | ||||
|  | ||||
| // Look for a character, but ignore it when it's in any kind of quotes, it | ||||
| // might be JavaScript | ||||
| func skipUntilCharIgnoreQuotes(html []byte, start int, char byte) int { | ||||
| 	inSingleQuote := false | ||||
| 	inDoubleQuote := false | ||||
| 	inGraveQuote := false | ||||
| 	i := start | ||||
| 	for i < len(html) { | ||||
| 		switch { | ||||
| 		case html[i] == char && !inSingleQuote && !inDoubleQuote && !inGraveQuote: | ||||
| 			return i | ||||
| 		case html[i] == '\'': | ||||
| 			inSingleQuote = !inSingleQuote | ||||
| 		case html[i] == '"': | ||||
| 			inDoubleQuote = !inDoubleQuote | ||||
| 		case html[i] == '`': | ||||
| 			inGraveQuote = !inGraveQuote | ||||
| 		} | ||||
| 		i++ | ||||
| 	} | ||||
| 	return start | ||||
| } | ||||
|  | ||||
| func findHtmlTagPos(tag []byte, tagname string) (bool, int) { | ||||
| 	i := 0 | ||||
| 	if i < len(tag) && tag[0] != '<' { | ||||
| 		return false, -1 | ||||
| 	} | ||||
| 	i++ | ||||
| 	i = skipSpace(tag, i) | ||||
|  | ||||
| 	if i < len(tag) && tag[i] == '/' { | ||||
| 		i++ | ||||
| 	} | ||||
|  | ||||
| 	i = skipSpace(tag, i) | ||||
| 	j := 0 | ||||
| 	for ; i < len(tag); i, j = i+1, j+1 { | ||||
| 		if j >= len(tagname) { | ||||
| 			break | ||||
| 		} | ||||
|  | ||||
| 		if strings.ToLower(string(tag[i]))[0] != tagname[j] { | ||||
| 			return false, -1 | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if i == len(tag) { | ||||
| 		return false, -1 | ||||
| 	} | ||||
|  | ||||
| 	rightAngle := skipUntilCharIgnoreQuotes(tag, i, '>') | ||||
| 	if rightAngle > i { | ||||
| 		return true, rightAngle | ||||
| 	} | ||||
|  | ||||
| 	return false, -1 | ||||
| } | ||||
|  | ||||
| func skipUntilChar(text []byte, start int, char byte) int { | ||||
| 	i := start | ||||
| 	for i < len(text) && text[i] != char { | ||||
| 		i++ | ||||
| 	} | ||||
| 	return i | ||||
| } | ||||
|  | ||||
| func skipSpace(tag []byte, i int) int { | ||||
| 	for i < len(tag) && isspace(tag[i]) { | ||||
| 		i++ | ||||
| 	} | ||||
| 	return i | ||||
| } | ||||
|  | ||||
| func skipChar(data []byte, start int, char byte) int { | ||||
| 	i := start | ||||
| 	for i < len(data) && data[i] == char { | ||||
| 		i++ | ||||
| 	} | ||||
| 	return i | ||||
| } | ||||
|  | ||||
| func doubleSpace(out *bytes.Buffer) { | ||||
| 	if out.Len() > 0 { | ||||
| 		out.WriteByte('\n') | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func isRelativeLink(link []byte) (yes bool) { | ||||
| 	// a tag begin with '#' | ||||
| 	if link[0] == '#' { | ||||
| 		return true | ||||
| 	} | ||||
|  | ||||
| 	// link begin with '/' but not '//', the second maybe a protocol relative link | ||||
| 	if len(link) >= 2 && link[0] == '/' && link[1] != '/' { | ||||
| 		return true | ||||
| 	} | ||||
|  | ||||
| 	// only the root '/' | ||||
| 	if len(link) == 1 && link[0] == '/' { | ||||
| 		return true | ||||
| 	} | ||||
|  | ||||
| 	// current directory : begin with "./" | ||||
| 	if bytes.HasPrefix(link, []byte("./")) { | ||||
| 		return true | ||||
| 	} | ||||
|  | ||||
| 	// parent directory : begin with "../" | ||||
| 	if bytes.HasPrefix(link, []byte("../")) { | ||||
| 		return true | ||||
| 	} | ||||
|  | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| func (options *Html) ensureUniqueHeaderID(id string) string { | ||||
| 	for count, found := options.headerIDs[id]; found; count, found = options.headerIDs[id] { | ||||
| 		tmp := fmt.Sprintf("%s-%d", id, count+1) | ||||
|  | ||||
| 		if _, tmpFound := options.headerIDs[tmp]; !tmpFound { | ||||
| 			options.headerIDs[id] = count + 1 | ||||
| 			id = tmp | ||||
| 		} else { | ||||
| 			id = id + "-1" | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if _, found := options.headerIDs[id]; !found { | ||||
| 		options.headerIDs[id] = 0 | ||||
| 	} | ||||
|  | ||||
| 	return id | ||||
| } | ||||
							
								
								
									
										334
									
								
								vendor/github.com/russross/blackfriday/latex.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										334
									
								
								vendor/github.com/russross/blackfriday/latex.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -1,334 +0,0 @@ | ||||
| // | ||||
| // Blackfriday Markdown Processor | ||||
| // Available at http://github.com/russross/blackfriday | ||||
| // | ||||
| // Copyright © 2011 Russ Ross <russ@russross.com>. | ||||
| // Distributed under the Simplified BSD License. | ||||
| // See README.md for details. | ||||
| // | ||||
|  | ||||
| // | ||||
| // | ||||
| // LaTeX rendering backend | ||||
| // | ||||
| // | ||||
|  | ||||
| package blackfriday | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"strings" | ||||
| ) | ||||
|  | ||||
| // Latex is a type that implements the Renderer interface for LaTeX output. | ||||
| // | ||||
| // Do not create this directly, instead use the LatexRenderer function. | ||||
| type Latex struct { | ||||
| } | ||||
|  | ||||
| // LatexRenderer creates and configures a Latex object, which | ||||
| // satisfies the Renderer interface. | ||||
| // | ||||
| // flags is a set of LATEX_* options ORed together (currently no such options | ||||
| // are defined). | ||||
| func LatexRenderer(flags int) Renderer { | ||||
| 	return &Latex{} | ||||
| } | ||||
|  | ||||
| func (options *Latex) GetFlags() int { | ||||
| 	return 0 | ||||
| } | ||||
|  | ||||
| // render code chunks using verbatim, or listings if we have a language | ||||
| func (options *Latex) BlockCode(out *bytes.Buffer, text []byte, info string) { | ||||
| 	if info == "" { | ||||
| 		out.WriteString("\n\\begin{verbatim}\n") | ||||
| 	} else { | ||||
| 		lang := strings.Fields(info)[0] | ||||
| 		out.WriteString("\n\\begin{lstlisting}[language=") | ||||
| 		out.WriteString(lang) | ||||
| 		out.WriteString("]\n") | ||||
| 	} | ||||
| 	out.Write(text) | ||||
| 	if info == "" { | ||||
| 		out.WriteString("\n\\end{verbatim}\n") | ||||
| 	} else { | ||||
| 		out.WriteString("\n\\end{lstlisting}\n") | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (options *Latex) TitleBlock(out *bytes.Buffer, text []byte) { | ||||
|  | ||||
| } | ||||
|  | ||||
| func (options *Latex) BlockQuote(out *bytes.Buffer, text []byte) { | ||||
| 	out.WriteString("\n\\begin{quotation}\n") | ||||
| 	out.Write(text) | ||||
| 	out.WriteString("\n\\end{quotation}\n") | ||||
| } | ||||
|  | ||||
| func (options *Latex) BlockHtml(out *bytes.Buffer, text []byte) { | ||||
| 	// a pretty lame thing to do... | ||||
| 	out.WriteString("\n\\begin{verbatim}\n") | ||||
| 	out.Write(text) | ||||
| 	out.WriteString("\n\\end{verbatim}\n") | ||||
| } | ||||
|  | ||||
| func (options *Latex) Header(out *bytes.Buffer, text func() bool, level int, id string) { | ||||
| 	marker := out.Len() | ||||
|  | ||||
| 	switch level { | ||||
| 	case 1: | ||||
| 		out.WriteString("\n\\section{") | ||||
| 	case 2: | ||||
| 		out.WriteString("\n\\subsection{") | ||||
| 	case 3: | ||||
| 		out.WriteString("\n\\subsubsection{") | ||||
| 	case 4: | ||||
| 		out.WriteString("\n\\paragraph{") | ||||
| 	case 5: | ||||
| 		out.WriteString("\n\\subparagraph{") | ||||
| 	case 6: | ||||
| 		out.WriteString("\n\\textbf{") | ||||
| 	} | ||||
| 	if !text() { | ||||
| 		out.Truncate(marker) | ||||
| 		return | ||||
| 	} | ||||
| 	out.WriteString("}\n") | ||||
| } | ||||
|  | ||||
| func (options *Latex) HRule(out *bytes.Buffer) { | ||||
| 	out.WriteString("\n\\HRule\n") | ||||
| } | ||||
|  | ||||
| func (options *Latex) List(out *bytes.Buffer, text func() bool, flags int) { | ||||
| 	marker := out.Len() | ||||
| 	if flags&LIST_TYPE_ORDERED != 0 { | ||||
| 		out.WriteString("\n\\begin{enumerate}\n") | ||||
| 	} else { | ||||
| 		out.WriteString("\n\\begin{itemize}\n") | ||||
| 	} | ||||
| 	if !text() { | ||||
| 		out.Truncate(marker) | ||||
| 		return | ||||
| 	} | ||||
| 	if flags&LIST_TYPE_ORDERED != 0 { | ||||
| 		out.WriteString("\n\\end{enumerate}\n") | ||||
| 	} else { | ||||
| 		out.WriteString("\n\\end{itemize}\n") | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (options *Latex) ListItem(out *bytes.Buffer, text []byte, flags int) { | ||||
| 	out.WriteString("\n\\item ") | ||||
| 	out.Write(text) | ||||
| } | ||||
|  | ||||
| func (options *Latex) Paragraph(out *bytes.Buffer, text func() bool) { | ||||
| 	marker := out.Len() | ||||
| 	out.WriteString("\n") | ||||
| 	if !text() { | ||||
| 		out.Truncate(marker) | ||||
| 		return | ||||
| 	} | ||||
| 	out.WriteString("\n") | ||||
| } | ||||
|  | ||||
| func (options *Latex) Table(out *bytes.Buffer, header []byte, body []byte, columnData []int) { | ||||
| 	out.WriteString("\n\\begin{tabular}{") | ||||
| 	for _, elt := range columnData { | ||||
| 		switch elt { | ||||
| 		case TABLE_ALIGNMENT_LEFT: | ||||
| 			out.WriteByte('l') | ||||
| 		case TABLE_ALIGNMENT_RIGHT: | ||||
| 			out.WriteByte('r') | ||||
| 		default: | ||||
| 			out.WriteByte('c') | ||||
| 		} | ||||
| 	} | ||||
| 	out.WriteString("}\n") | ||||
| 	out.Write(header) | ||||
| 	out.WriteString(" \\\\\n\\hline\n") | ||||
| 	out.Write(body) | ||||
| 	out.WriteString("\n\\end{tabular}\n") | ||||
| } | ||||
|  | ||||
| func (options *Latex) TableRow(out *bytes.Buffer, text []byte) { | ||||
| 	if out.Len() > 0 { | ||||
| 		out.WriteString(" \\\\\n") | ||||
| 	} | ||||
| 	out.Write(text) | ||||
| } | ||||
|  | ||||
| func (options *Latex) TableHeaderCell(out *bytes.Buffer, text []byte, align int) { | ||||
| 	if out.Len() > 0 { | ||||
| 		out.WriteString(" & ") | ||||
| 	} | ||||
| 	out.Write(text) | ||||
| } | ||||
|  | ||||
| func (options *Latex) TableCell(out *bytes.Buffer, text []byte, align int) { | ||||
| 	if out.Len() > 0 { | ||||
| 		out.WriteString(" & ") | ||||
| 	} | ||||
| 	out.Write(text) | ||||
| } | ||||
|  | ||||
| // TODO: this | ||||
| func (options *Latex) Footnotes(out *bytes.Buffer, text func() bool) { | ||||
|  | ||||
| } | ||||
|  | ||||
| func (options *Latex) FootnoteItem(out *bytes.Buffer, name, text []byte, flags int) { | ||||
|  | ||||
| } | ||||
|  | ||||
| func (options *Latex) AutoLink(out *bytes.Buffer, link []byte, kind int) { | ||||
| 	out.WriteString("\\href{") | ||||
| 	if kind == LINK_TYPE_EMAIL { | ||||
| 		out.WriteString("mailto:") | ||||
| 	} | ||||
| 	out.Write(link) | ||||
| 	out.WriteString("}{") | ||||
| 	out.Write(link) | ||||
| 	out.WriteString("}") | ||||
| } | ||||
|  | ||||
| func (options *Latex) CodeSpan(out *bytes.Buffer, text []byte) { | ||||
| 	out.WriteString("\\texttt{") | ||||
| 	escapeSpecialChars(out, text) | ||||
| 	out.WriteString("}") | ||||
| } | ||||
|  | ||||
| func (options *Latex) DoubleEmphasis(out *bytes.Buffer, text []byte) { | ||||
| 	out.WriteString("\\textbf{") | ||||
| 	out.Write(text) | ||||
| 	out.WriteString("}") | ||||
| } | ||||
|  | ||||
| func (options *Latex) Emphasis(out *bytes.Buffer, text []byte) { | ||||
| 	out.WriteString("\\textit{") | ||||
| 	out.Write(text) | ||||
| 	out.WriteString("}") | ||||
| } | ||||
|  | ||||
| func (options *Latex) Image(out *bytes.Buffer, link []byte, title []byte, alt []byte) { | ||||
| 	if bytes.HasPrefix(link, []byte("http://")) || bytes.HasPrefix(link, []byte("https://")) { | ||||
| 		// treat it like a link | ||||
| 		out.WriteString("\\href{") | ||||
| 		out.Write(link) | ||||
| 		out.WriteString("}{") | ||||
| 		out.Write(alt) | ||||
| 		out.WriteString("}") | ||||
| 	} else { | ||||
| 		out.WriteString("\\includegraphics{") | ||||
| 		out.Write(link) | ||||
| 		out.WriteString("}") | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (options *Latex) LineBreak(out *bytes.Buffer) { | ||||
| 	out.WriteString(" \\\\\n") | ||||
| } | ||||
|  | ||||
| func (options *Latex) Link(out *bytes.Buffer, link []byte, title []byte, content []byte) { | ||||
| 	out.WriteString("\\href{") | ||||
| 	out.Write(link) | ||||
| 	out.WriteString("}{") | ||||
| 	out.Write(content) | ||||
| 	out.WriteString("}") | ||||
| } | ||||
|  | ||||
| func (options *Latex) RawHtmlTag(out *bytes.Buffer, tag []byte) { | ||||
| } | ||||
|  | ||||
| func (options *Latex) TripleEmphasis(out *bytes.Buffer, text []byte) { | ||||
| 	out.WriteString("\\textbf{\\textit{") | ||||
| 	out.Write(text) | ||||
| 	out.WriteString("}}") | ||||
| } | ||||
|  | ||||
| func (options *Latex) StrikeThrough(out *bytes.Buffer, text []byte) { | ||||
| 	out.WriteString("\\sout{") | ||||
| 	out.Write(text) | ||||
| 	out.WriteString("}") | ||||
| } | ||||
|  | ||||
| // TODO: this | ||||
| func (options *Latex) FootnoteRef(out *bytes.Buffer, ref []byte, id int) { | ||||
|  | ||||
| } | ||||
|  | ||||
| func needsBackslash(c byte) bool { | ||||
| 	for _, r := range []byte("_{}%$&\\~#") { | ||||
| 		if c == r { | ||||
| 			return true | ||||
| 		} | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| func escapeSpecialChars(out *bytes.Buffer, text []byte) { | ||||
| 	for i := 0; i < len(text); i++ { | ||||
| 		// directly copy normal characters | ||||
| 		org := i | ||||
|  | ||||
| 		for i < len(text) && !needsBackslash(text[i]) { | ||||
| 			i++ | ||||
| 		} | ||||
| 		if i > org { | ||||
| 			out.Write(text[org:i]) | ||||
| 		} | ||||
|  | ||||
| 		// escape a character | ||||
| 		if i >= len(text) { | ||||
| 			break | ||||
| 		} | ||||
| 		out.WriteByte('\\') | ||||
| 		out.WriteByte(text[i]) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (options *Latex) Entity(out *bytes.Buffer, entity []byte) { | ||||
| 	// TODO: convert this into a unicode character or something | ||||
| 	out.Write(entity) | ||||
| } | ||||
|  | ||||
| func (options *Latex) NormalText(out *bytes.Buffer, text []byte) { | ||||
| 	escapeSpecialChars(out, text) | ||||
| } | ||||
|  | ||||
| // header and footer | ||||
| func (options *Latex) DocumentHeader(out *bytes.Buffer) { | ||||
| 	out.WriteString("\\documentclass{article}\n") | ||||
| 	out.WriteString("\n") | ||||
| 	out.WriteString("\\usepackage{graphicx}\n") | ||||
| 	out.WriteString("\\usepackage{listings}\n") | ||||
| 	out.WriteString("\\usepackage[margin=1in]{geometry}\n") | ||||
| 	out.WriteString("\\usepackage[utf8]{inputenc}\n") | ||||
| 	out.WriteString("\\usepackage{verbatim}\n") | ||||
| 	out.WriteString("\\usepackage[normalem]{ulem}\n") | ||||
| 	out.WriteString("\\usepackage{hyperref}\n") | ||||
| 	out.WriteString("\n") | ||||
| 	out.WriteString("\\hypersetup{colorlinks,%\n") | ||||
| 	out.WriteString("  citecolor=black,%\n") | ||||
| 	out.WriteString("  filecolor=black,%\n") | ||||
| 	out.WriteString("  linkcolor=black,%\n") | ||||
| 	out.WriteString("  urlcolor=black,%\n") | ||||
| 	out.WriteString("  pdfstartview=FitH,%\n") | ||||
| 	out.WriteString("  breaklinks=true,%\n") | ||||
| 	out.WriteString("  pdfauthor={Blackfriday Markdown Processor v") | ||||
| 	out.WriteString(VERSION) | ||||
| 	out.WriteString("}}\n") | ||||
| 	out.WriteString("\n") | ||||
| 	out.WriteString("\\newcommand{\\HRule}{\\rule{\\linewidth}{0.5mm}}\n") | ||||
| 	out.WriteString("\\addtolength{\\parskip}{0.5\\baselineskip}\n") | ||||
| 	out.WriteString("\\parindent=0pt\n") | ||||
| 	out.WriteString("\n") | ||||
| 	out.WriteString("\\begin{document}\n") | ||||
| } | ||||
|  | ||||
| func (options *Latex) DocumentFooter(out *bytes.Buffer) { | ||||
| 	out.WriteString("\n\\end{document}\n") | ||||
| } | ||||
| @@ -1,30 +1,17 @@ | ||||
| sudo: false | ||||
| language: go | ||||
| go: | ||||
|   - 1.5.4 | ||||
|   - 1.6.2 | ||||
|   - "1.10.x" | ||||
|   - "1.11.x" | ||||
|   - tip | ||||
| matrix: | ||||
|   include: | ||||
|     - go: 1.2.2 | ||||
|       script: | ||||
|         - go get -t -v ./... | ||||
|         - go test -v -race ./... | ||||
|     - go: 1.3.3 | ||||
|       script: | ||||
|         - go get -t -v ./... | ||||
|         - go test -v -race ./... | ||||
|     - go: 1.4.3 | ||||
|       script: | ||||
|         - go get -t -v ./... | ||||
|         - go test -v -race ./... | ||||
|   fast_finish: true | ||||
|   allow_failures: | ||||
|     - go: tip | ||||
|   fast_finish: true | ||||
| install: | ||||
|   - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). | ||||
| script: | ||||
|   - go get -t -v ./... | ||||
|   - diff -u <(echo -n) <(gofmt -d -s .) | ||||
|   - go tool vet . | ||||
|   - go test -v -race ./... | ||||
|   - go test -v ./... | ||||
| @@ -1,6 +1,4 @@ | ||||
| Blackfriday | ||||
| [![Build Status][BuildSVG]][BuildURL] | ||||
| [![Godoc][GodocV2SVG]][GodocV2URL] | ||||
| Blackfriday [](https://travis-ci.org/russross/blackfriday) | ||||
| =========== | ||||
| 
 | ||||
| Blackfriday is a [Markdown][1] processor implemented in [Go][2]. It | ||||
| @@ -18,12 +16,18 @@ It started as a translation from C of [Sundown][3]. | ||||
| Installation | ||||
| ------------ | ||||
| 
 | ||||
| Blackfriday is compatible with any modern Go release. With Go and git installed: | ||||
| Blackfriday is compatible with any modern Go release. With Go 1.7 and git | ||||
| installed: | ||||
| 
 | ||||
|     go get -u gopkg.in/russross/blackfriday.v2 | ||||
|     go get gopkg.in/russross/blackfriday.v2 | ||||
| 
 | ||||
| will download, compile, and install the package into your `$GOPATH` directory | ||||
| hierarchy. | ||||
| will download, compile, and install the package into your `$GOPATH` | ||||
| directory hierarchy. Alternatively, you can achieve the same if you | ||||
| import it into a project: | ||||
| 
 | ||||
|     import "gopkg.in/russross/blackfriday.v2" | ||||
| 
 | ||||
| and `go get` without parameters. | ||||
| 
 | ||||
| 
 | ||||
| Versions | ||||
| @@ -34,7 +38,7 @@ developed on its own branch: https://github.com/russross/blackfriday/tree/v2 and | ||||
| documentation is available at | ||||
| https://godoc.org/gopkg.in/russross/blackfriday.v2. | ||||
| 
 | ||||
| It is `go get`-able via [gopkg.in][6] at `gopkg.in/russross/blackfriday.v2`, | ||||
| It is `go get`-able via via [gopkg.in][6] at `gopkg.in/russross/blackfriday.v2`, | ||||
| but we highly recommend using package management tool like [dep][7] or | ||||
| [Glide][8] and make use of semantic versioning. With package management you | ||||
| should import `github.com/russross/blackfriday` and specify that you're using | ||||
| @@ -58,43 +62,9 @@ Potential drawbacks: | ||||
|   v2. See issue [#348](https://github.com/russross/blackfriday/issues/348) for | ||||
|   tracking. | ||||
| 
 | ||||
| If you are still interested in the legacy `v1`, you can import it from | ||||
| `github.com/russross/blackfriday`. Documentation for the legacy v1 can be found | ||||
| here: https://godoc.org/github.com/russross/blackfriday | ||||
| 
 | ||||
| ### Known issue with `dep` | ||||
| 
 | ||||
| There is a known problem with using Blackfriday v1 _transitively_ and `dep`. | ||||
| Currently `dep` prioritizes semver versions over anything else, and picks the | ||||
| latest one, plus it does not apply a `[[constraint]]` specifier to transitively | ||||
| pulled in packages. So if you're using something that uses Blackfriday v1, but | ||||
| that something does not use `dep` yet, you will get Blackfriday v2 pulled in and | ||||
| your first dependency will fail to build. | ||||
| 
 | ||||
| There are couple of fixes for it, documented here: | ||||
| https://github.com/golang/dep/blob/master/docs/FAQ.md#how-do-i-constrain-a-transitive-dependencys-version | ||||
| 
 | ||||
| Meanwhile, `dep` team is working on a more general solution to the constraints | ||||
| on transitive dependencies problem: https://github.com/golang/dep/issues/1124. | ||||
| 
 | ||||
| 
 | ||||
| Usage | ||||
| ----- | ||||
| 
 | ||||
| ### v1 | ||||
| 
 | ||||
| For basic usage, it is as simple as getting your input into a byte | ||||
| slice and calling: | ||||
| 
 | ||||
|     output := blackfriday.MarkdownBasic(input) | ||||
| 
 | ||||
| This renders it with no extensions enabled. To get a more useful | ||||
| feature set, use this instead: | ||||
| 
 | ||||
|     output := blackfriday.MarkdownCommon(input) | ||||
| 
 | ||||
| ### v2 | ||||
| 
 | ||||
| For the most sensible markdown processing, it is as simple as getting your input | ||||
| into a byte slice and calling: | ||||
| 
 | ||||
| @@ -121,7 +91,7 @@ Here's an example of simple usage of Blackfriday together with Bluemonday: | ||||
| ```go | ||||
| import ( | ||||
|     "github.com/microcosm-cc/bluemonday" | ||||
|     "gopkg.in/russross/blackfriday.v2" | ||||
|     "github.com/russross/blackfriday" | ||||
| ) | ||||
| 
 | ||||
| // ... | ||||
| @@ -129,21 +99,11 @@ unsafe := blackfriday.Run(input) | ||||
| html := bluemonday.UGCPolicy().SanitizeBytes(unsafe) | ||||
| ``` | ||||
| 
 | ||||
| ### Custom options, v1 | ||||
| 
 | ||||
| If you want to customize the set of options, first get a renderer | ||||
| (currently only the HTML output engine), then use it to | ||||
| call the more general `Markdown` function. For examples, see the | ||||
| implementations of `MarkdownBasic` and `MarkdownCommon` in | ||||
| `markdown.go`. | ||||
| 
 | ||||
| ### Custom options, v2 | ||||
| ### Custom options | ||||
| 
 | ||||
| If you want to customize the set of options, use `blackfriday.WithExtensions`, | ||||
| `blackfriday.WithRenderer` and `blackfriday.WithRefOverride`. | ||||
| 
 | ||||
| ### `blackfriday-tool` | ||||
| 
 | ||||
| You can also check out `blackfriday-tool` for a more complete example | ||||
| of how to use it. Download and install it using: | ||||
| 
 | ||||
| @@ -163,22 +123,6 @@ installed in `$GOPATH/bin`.  This is a statically-linked binary that | ||||
| can be copied to wherever you need it without worrying about | ||||
| dependencies and library versions. | ||||
| 
 | ||||
| ### Sanitized anchor names | ||||
| 
 | ||||
| Blackfriday includes an algorithm for creating sanitized anchor names | ||||
| corresponding to a given input text. This algorithm is used to create | ||||
| anchors for headings when `EXTENSION_AUTO_HEADER_IDS` is enabled. The | ||||
| algorithm has a specification, so that other packages can create | ||||
| compatible anchor names and links to those anchors. | ||||
| 
 | ||||
| The specification is located at https://godoc.org/github.com/russross/blackfriday#hdr-Sanitized_Anchor_Names. | ||||
| 
 | ||||
| [`SanitizedAnchorName`](https://godoc.org/github.com/russross/blackfriday#SanitizedAnchorName) exposes this functionality, and can be used to | ||||
| create compatible links to the anchor names generated by blackfriday. | ||||
| This algorithm is also implemented in a small standalone package at | ||||
| [`github.com/shurcooL/sanitized_anchor_name`](https://godoc.org/github.com/shurcooL/sanitized_anchor_name). It can be useful for clients | ||||
| that want a small package and don't need full functionality of blackfriday. | ||||
| 
 | ||||
| 
 | ||||
| Features | ||||
| -------- | ||||
| @@ -246,7 +190,7 @@ implements the following extensions: | ||||
|     and supply a language (to make syntax highlighting simple). Just | ||||
|     mark it like this: | ||||
| 
 | ||||
|         ``` go | ||||
|         ```go | ||||
|         func getTrue() bool { | ||||
|             return true | ||||
|         } | ||||
| @@ -255,15 +199,6 @@ implements the following extensions: | ||||
|     You can use 3 or more backticks to mark the beginning of the | ||||
|     block, and the same number to mark the end of the block. | ||||
| 
 | ||||
|     To preserve classes of fenced code blocks while using the bluemonday | ||||
|     HTML sanitizer, use the following policy: | ||||
| 
 | ||||
|     ``` go | ||||
|     p := bluemonday.UGCPolicy() | ||||
|     p.AllowAttrs("class").Matching(regexp.MustCompile("^language-[a-zA-Z0-9]+$")).OnElements("code") | ||||
|     html := p.SanitizeBytes(unsafe) | ||||
|     ``` | ||||
| 
 | ||||
| *   **Definition lists**. A simple definition list is made of a single-line | ||||
|     term followed by a colon and the definition for that term. | ||||
| 
 | ||||
| @@ -289,10 +224,8 @@ implements the following extensions: | ||||
| *   **Strikethrough**. Use two tildes (`~~`) to mark text that | ||||
|     should be crossed out. | ||||
| 
 | ||||
| *   **Hard line breaks**. With this extension enabled (it is off by | ||||
|     default in the `MarkdownBasic` and `MarkdownCommon` convenience | ||||
|     functions), newlines in the input translate into line breaks in | ||||
|     the output. | ||||
| *   **Hard line breaks**. With this extension enabled newlines in the input | ||||
|     translate into line breaks in the output. This extension is off by default. | ||||
| 
 | ||||
| *   **Smart quotes**. Smartypants-style punctuation substitution is | ||||
|     supported, turning normal double- and single-quote marks into | ||||
| @@ -328,18 +261,20 @@ are a few of note: | ||||
| *   [markdownfmt](https://github.com/shurcooL/markdownfmt): like gofmt, | ||||
|     but for markdown. | ||||
| 
 | ||||
| *   [LaTeX output](https://bitbucket.org/ambrevar/blackfriday-latex): | ||||
| *   [LaTeX output](https://github.com/Ambrevar/Blackfriday-LaTeX): | ||||
|     renders output as LaTeX. | ||||
| 
 | ||||
| *   [Blackfriday-Confluence](https://github.com/kentaro-m/blackfriday-confluence): provides a [Confluence Wiki Markup](https://confluence.atlassian.com/doc/confluence-wiki-markup-251003035.html) renderer. | ||||
| 
 | ||||
| TODO | ||||
| 
 | ||||
| Todo | ||||
| ---- | ||||
| 
 | ||||
| *   More unit testing | ||||
| *   Improve Unicode support. It does not understand all Unicode | ||||
| *   Improve unicode support. It does not understand all unicode | ||||
|     rules (about what constitutes a letter, a punctuation symbol, | ||||
|     etc.), so it may fail to detect word boundaries correctly in | ||||
|     some instances. It is safe on all UTF-8 input. | ||||
|     some instances. It is safe on all utf-8 input. | ||||
| 
 | ||||
| 
 | ||||
| License | ||||
| @@ -354,10 +289,3 @@ License | ||||
|    [4]: https://godoc.org/gopkg.in/russross/blackfriday.v2#Parse "Parse func" | ||||
|    [5]: https://github.com/microcosm-cc/bluemonday "Bluemonday" | ||||
|    [6]: https://labix.org/gopkg.in "gopkg.in" | ||||
|    [7]: https://github.com/golang/dep/ "dep" | ||||
|    [8]: https://github.com/Masterminds/glide "Glide" | ||||
| 
 | ||||
|    [BuildSVG]: https://travis-ci.org/russross/blackfriday.svg?branch=master | ||||
|    [BuildURL]: https://travis-ci.org/russross/blackfriday | ||||
|    [GodocV2SVG]: https://godoc.org/gopkg.in/russross/blackfriday.v2?status.svg | ||||
|    [GodocV2URL]: https://godoc.org/gopkg.in/russross/blackfriday.v2 | ||||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										18
									
								
								vendor/github.com/russross/blackfriday/v2/doc.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										18
									
								
								vendor/github.com/russross/blackfriday/v2/doc.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,18 @@ | ||||
| // Package blackfriday is a markdown processor. | ||||
| // | ||||
| // It translates plain text with simple formatting rules into an AST, which can | ||||
| // then be further processed to HTML (provided by Blackfriday itself) or other | ||||
| // formats (provided by the community). | ||||
| // | ||||
| // The simplest way to invoke Blackfriday is to call the Run function. It will | ||||
| // take a text input and produce a text output in HTML (or other format). | ||||
| // | ||||
| // A slightly more sophisticated way to use Blackfriday is to create a Markdown | ||||
| // processor and to call Parse, which returns a syntax tree for the input | ||||
| // document. You can leverage Blackfriday's parsing for content extraction from | ||||
| // markdown documents. You can assign a custom renderer and set various options | ||||
| // to the Markdown processor. | ||||
| // | ||||
| // If you're interested in calling Blackfriday from command line, see | ||||
| // https://github.com/russross/blackfriday-tool. | ||||
| package blackfriday | ||||
							
								
								
									
										34
									
								
								vendor/github.com/russross/blackfriday/v2/esc.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										34
									
								
								vendor/github.com/russross/blackfriday/v2/esc.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,34 @@ | ||||
| package blackfriday | ||||
|  | ||||
| import ( | ||||
| 	"html" | ||||
| 	"io" | ||||
| ) | ||||
|  | ||||
| var htmlEscaper = [256][]byte{ | ||||
| 	'&': []byte("&"), | ||||
| 	'<': []byte("<"), | ||||
| 	'>': []byte(">"), | ||||
| 	'"': []byte("""), | ||||
| } | ||||
|  | ||||
| func escapeHTML(w io.Writer, s []byte) { | ||||
| 	var start, end int | ||||
| 	for end < len(s) { | ||||
| 		escSeq := htmlEscaper[s[end]] | ||||
| 		if escSeq != nil { | ||||
| 			w.Write(s[start:end]) | ||||
| 			w.Write(escSeq) | ||||
| 			start = end + 1 | ||||
| 		} | ||||
| 		end++ | ||||
| 	} | ||||
| 	if start < len(s) && end <= len(s) { | ||||
| 		w.Write(s[start:end]) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func escLink(w io.Writer, text []byte) { | ||||
| 	unesc := html.UnescapeString(string(text)) | ||||
| 	escapeHTML(w, []byte(unesc)) | ||||
| } | ||||
							
								
								
									
										1
									
								
								vendor/github.com/russross/blackfriday/v2/go.mod
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										1
									
								
								vendor/github.com/russross/blackfriday/v2/go.mod
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1 @@ | ||||
| module github.com/russross/blackfriday/v2 | ||||
							
								
								
									
										949
									
								
								vendor/github.com/russross/blackfriday/v2/html.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										949
									
								
								vendor/github.com/russross/blackfriday/v2/html.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,949 @@ | ||||
| // | ||||
| // Blackfriday Markdown Processor | ||||
| // Available at http://github.com/russross/blackfriday | ||||
| // | ||||
| // Copyright © 2011 Russ Ross <russ@russross.com>. | ||||
| // Distributed under the Simplified BSD License. | ||||
| // See README.md for details. | ||||
| // | ||||
|  | ||||
| // | ||||
| // | ||||
| // HTML rendering backend | ||||
| // | ||||
| // | ||||
|  | ||||
| package blackfriday | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"regexp" | ||||
| 	"strings" | ||||
| ) | ||||
|  | ||||
| // HTMLFlags control optional behavior of HTML renderer. | ||||
| type HTMLFlags int | ||||
|  | ||||
| // HTML renderer configuration options. | ||||
| const ( | ||||
| 	HTMLFlagsNone           HTMLFlags = 0 | ||||
| 	SkipHTML                HTMLFlags = 1 << iota // Skip preformatted HTML blocks | ||||
| 	SkipImages                                    // Skip embedded images | ||||
| 	SkipLinks                                     // Skip all links | ||||
| 	Safelink                                      // Only link to trusted protocols | ||||
| 	NofollowLinks                                 // Only link with rel="nofollow" | ||||
| 	NoreferrerLinks                               // Only link with rel="noreferrer" | ||||
| 	NoopenerLinks                                 // Only link with rel="noopener" | ||||
| 	HrefTargetBlank                               // Add a blank target | ||||
| 	CompletePage                                  // Generate a complete HTML page | ||||
| 	UseXHTML                                      // Generate XHTML output instead of HTML | ||||
| 	FootnoteReturnLinks                           // Generate a link at the end of a footnote to return to the source | ||||
| 	Smartypants                                   // Enable smart punctuation substitutions | ||||
| 	SmartypantsFractions                          // Enable smart fractions (with Smartypants) | ||||
| 	SmartypantsDashes                             // Enable smart dashes (with Smartypants) | ||||
| 	SmartypantsLatexDashes                        // Enable LaTeX-style dashes (with Smartypants) | ||||
| 	SmartypantsAngledQuotes                       // Enable angled double quotes (with Smartypants) for double quotes rendering | ||||
| 	SmartypantsQuotesNBSP                         // Enable « French guillemets » (with Smartypants) | ||||
| 	TOC                                           // Generate a table of contents | ||||
| ) | ||||
|  | ||||
| var ( | ||||
| 	htmlTagRe = regexp.MustCompile("(?i)^" + htmlTag) | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	htmlTag = "(?:" + openTag + "|" + closeTag + "|" + htmlComment + "|" + | ||||
| 		processingInstruction + "|" + declaration + "|" + cdata + ")" | ||||
| 	closeTag              = "</" + tagName + "\\s*[>]" | ||||
| 	openTag               = "<" + tagName + attribute + "*" + "\\s*/?>" | ||||
| 	attribute             = "(?:" + "\\s+" + attributeName + attributeValueSpec + "?)" | ||||
| 	attributeValue        = "(?:" + unquotedValue + "|" + singleQuotedValue + "|" + doubleQuotedValue + ")" | ||||
| 	attributeValueSpec    = "(?:" + "\\s*=" + "\\s*" + attributeValue + ")" | ||||
| 	attributeName         = "[a-zA-Z_:][a-zA-Z0-9:._-]*" | ||||
| 	cdata                 = "<!\\[CDATA\\[[\\s\\S]*?\\]\\]>" | ||||
| 	declaration           = "<![A-Z]+" + "\\s+[^>]*>" | ||||
| 	doubleQuotedValue     = "\"[^\"]*\"" | ||||
| 	htmlComment           = "<!---->|<!--(?:-?[^>-])(?:-?[^-])*-->" | ||||
| 	processingInstruction = "[<][?].*?[?][>]" | ||||
| 	singleQuotedValue     = "'[^']*'" | ||||
| 	tagName               = "[A-Za-z][A-Za-z0-9-]*" | ||||
| 	unquotedValue         = "[^\"'=<>`\\x00-\\x20]+" | ||||
| ) | ||||
|  | ||||
| // HTMLRendererParameters is a collection of supplementary parameters tweaking | ||||
| // the behavior of various parts of HTML renderer. | ||||
| type HTMLRendererParameters struct { | ||||
| 	// Prepend this text to each relative URL. | ||||
| 	AbsolutePrefix string | ||||
| 	// Add this text to each footnote anchor, to ensure uniqueness. | ||||
| 	FootnoteAnchorPrefix string | ||||
| 	// Show this text inside the <a> tag for a footnote return link, if the | ||||
| 	// HTML_FOOTNOTE_RETURN_LINKS flag is enabled. If blank, the string | ||||
| 	// <sup>[return]</sup> is used. | ||||
| 	FootnoteReturnLinkContents string | ||||
| 	// If set, add this text to the front of each Heading ID, to ensure | ||||
| 	// uniqueness. | ||||
| 	HeadingIDPrefix string | ||||
| 	// If set, add this text to the back of each Heading ID, to ensure uniqueness. | ||||
| 	HeadingIDSuffix string | ||||
| 	// Increase heading levels: if the offset is 1, <h1> becomes <h2> etc. | ||||
| 	// Negative offset is also valid. | ||||
| 	// Resulting levels are clipped between 1 and 6. | ||||
| 	HeadingLevelOffset int | ||||
|  | ||||
| 	Title string // Document title (used if CompletePage is set) | ||||
| 	CSS   string // Optional CSS file URL (used if CompletePage is set) | ||||
| 	Icon  string // Optional icon file URL (used if CompletePage is set) | ||||
|  | ||||
| 	Flags HTMLFlags // Flags allow customizing this renderer's behavior | ||||
| } | ||||
|  | ||||
| // HTMLRenderer is a type that implements the Renderer interface for HTML output. | ||||
| // | ||||
| // Do not create this directly, instead use the NewHTMLRenderer function. | ||||
| type HTMLRenderer struct { | ||||
| 	HTMLRendererParameters | ||||
|  | ||||
| 	closeTag string // how to end singleton tags: either " />" or ">" | ||||
|  | ||||
| 	// Track heading IDs to prevent ID collision in a single generation. | ||||
| 	headingIDs map[string]int | ||||
|  | ||||
| 	lastOutputLen int | ||||
| 	disableTags   int | ||||
|  | ||||
| 	sr *SPRenderer | ||||
| } | ||||
|  | ||||
| const ( | ||||
| 	xhtmlClose = " />" | ||||
| 	htmlClose  = ">" | ||||
| ) | ||||
|  | ||||
| // NewHTMLRenderer creates and configures an HTMLRenderer object, which | ||||
| // satisfies the Renderer interface. | ||||
| func NewHTMLRenderer(params HTMLRendererParameters) *HTMLRenderer { | ||||
| 	// configure the rendering engine | ||||
| 	closeTag := htmlClose | ||||
| 	if params.Flags&UseXHTML != 0 { | ||||
| 		closeTag = xhtmlClose | ||||
| 	} | ||||
|  | ||||
| 	if params.FootnoteReturnLinkContents == "" { | ||||
| 		params.FootnoteReturnLinkContents = `<sup>[return]</sup>` | ||||
| 	} | ||||
|  | ||||
| 	return &HTMLRenderer{ | ||||
| 		HTMLRendererParameters: params, | ||||
|  | ||||
| 		closeTag:   closeTag, | ||||
| 		headingIDs: make(map[string]int), | ||||
|  | ||||
| 		sr: NewSmartypantsRenderer(params.Flags), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func isHTMLTag(tag []byte, tagname string) bool { | ||||
| 	found, _ := findHTMLTagPos(tag, tagname) | ||||
| 	return found | ||||
| } | ||||
|  | ||||
| // Look for a character, but ignore it when it's in any kind of quotes, it | ||||
| // might be JavaScript | ||||
| func skipUntilCharIgnoreQuotes(html []byte, start int, char byte) int { | ||||
| 	inSingleQuote := false | ||||
| 	inDoubleQuote := false | ||||
| 	inGraveQuote := false | ||||
| 	i := start | ||||
| 	for i < len(html) { | ||||
| 		switch { | ||||
| 		case html[i] == char && !inSingleQuote && !inDoubleQuote && !inGraveQuote: | ||||
| 			return i | ||||
| 		case html[i] == '\'': | ||||
| 			inSingleQuote = !inSingleQuote | ||||
| 		case html[i] == '"': | ||||
| 			inDoubleQuote = !inDoubleQuote | ||||
| 		case html[i] == '`': | ||||
| 			inGraveQuote = !inGraveQuote | ||||
| 		} | ||||
| 		i++ | ||||
| 	} | ||||
| 	return start | ||||
| } | ||||
|  | ||||
| func findHTMLTagPos(tag []byte, tagname string) (bool, int) { | ||||
| 	i := 0 | ||||
| 	if i < len(tag) && tag[0] != '<' { | ||||
| 		return false, -1 | ||||
| 	} | ||||
| 	i++ | ||||
| 	i = skipSpace(tag, i) | ||||
|  | ||||
| 	if i < len(tag) && tag[i] == '/' { | ||||
| 		i++ | ||||
| 	} | ||||
|  | ||||
| 	i = skipSpace(tag, i) | ||||
| 	j := 0 | ||||
| 	for ; i < len(tag); i, j = i+1, j+1 { | ||||
| 		if j >= len(tagname) { | ||||
| 			break | ||||
| 		} | ||||
|  | ||||
| 		if strings.ToLower(string(tag[i]))[0] != tagname[j] { | ||||
| 			return false, -1 | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if i == len(tag) { | ||||
| 		return false, -1 | ||||
| 	} | ||||
|  | ||||
| 	rightAngle := skipUntilCharIgnoreQuotes(tag, i, '>') | ||||
| 	if rightAngle >= i { | ||||
| 		return true, rightAngle | ||||
| 	} | ||||
|  | ||||
| 	return false, -1 | ||||
| } | ||||
|  | ||||
| func skipSpace(tag []byte, i int) int { | ||||
| 	for i < len(tag) && isspace(tag[i]) { | ||||
| 		i++ | ||||
| 	} | ||||
| 	return i | ||||
| } | ||||
|  | ||||
| func isRelativeLink(link []byte) (yes bool) { | ||||
| 	// a tag begin with '#' | ||||
| 	if link[0] == '#' { | ||||
| 		return true | ||||
| 	} | ||||
|  | ||||
| 	// link begin with '/' but not '//', the second maybe a protocol relative link | ||||
| 	if len(link) >= 2 && link[0] == '/' && link[1] != '/' { | ||||
| 		return true | ||||
| 	} | ||||
|  | ||||
| 	// only the root '/' | ||||
| 	if len(link) == 1 && link[0] == '/' { | ||||
| 		return true | ||||
| 	} | ||||
|  | ||||
| 	// current directory : begin with "./" | ||||
| 	if bytes.HasPrefix(link, []byte("./")) { | ||||
| 		return true | ||||
| 	} | ||||
|  | ||||
| 	// parent directory : begin with "../" | ||||
| 	if bytes.HasPrefix(link, []byte("../")) { | ||||
| 		return true | ||||
| 	} | ||||
|  | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| func (r *HTMLRenderer) ensureUniqueHeadingID(id string) string { | ||||
| 	for count, found := r.headingIDs[id]; found; count, found = r.headingIDs[id] { | ||||
| 		tmp := fmt.Sprintf("%s-%d", id, count+1) | ||||
|  | ||||
| 		if _, tmpFound := r.headingIDs[tmp]; !tmpFound { | ||||
| 			r.headingIDs[id] = count + 1 | ||||
| 			id = tmp | ||||
| 		} else { | ||||
| 			id = id + "-1" | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if _, found := r.headingIDs[id]; !found { | ||||
| 		r.headingIDs[id] = 0 | ||||
| 	} | ||||
|  | ||||
| 	return id | ||||
| } | ||||
|  | ||||
| func (r *HTMLRenderer) addAbsPrefix(link []byte) []byte { | ||||
| 	if r.AbsolutePrefix != "" && isRelativeLink(link) && link[0] != '.' { | ||||
| 		newDest := r.AbsolutePrefix | ||||
| 		if link[0] != '/' { | ||||
| 			newDest += "/" | ||||
| 		} | ||||
| 		newDest += string(link) | ||||
| 		return []byte(newDest) | ||||
| 	} | ||||
| 	return link | ||||
| } | ||||
|  | ||||
| func appendLinkAttrs(attrs []string, flags HTMLFlags, link []byte) []string { | ||||
| 	if isRelativeLink(link) { | ||||
| 		return attrs | ||||
| 	} | ||||
| 	val := []string{} | ||||
| 	if flags&NofollowLinks != 0 { | ||||
| 		val = append(val, "nofollow") | ||||
| 	} | ||||
| 	if flags&NoreferrerLinks != 0 { | ||||
| 		val = append(val, "noreferrer") | ||||
| 	} | ||||
| 	if flags&NoopenerLinks != 0 { | ||||
| 		val = append(val, "noopener") | ||||
| 	} | ||||
| 	if flags&HrefTargetBlank != 0 { | ||||
| 		attrs = append(attrs, "target=\"_blank\"") | ||||
| 	} | ||||
| 	if len(val) == 0 { | ||||
| 		return attrs | ||||
| 	} | ||||
| 	attr := fmt.Sprintf("rel=%q", strings.Join(val, " ")) | ||||
| 	return append(attrs, attr) | ||||
| } | ||||
|  | ||||
| func isMailto(link []byte) bool { | ||||
| 	return bytes.HasPrefix(link, []byte("mailto:")) | ||||
| } | ||||
|  | ||||
| func needSkipLink(flags HTMLFlags, dest []byte) bool { | ||||
| 	if flags&SkipLinks != 0 { | ||||
| 		return true | ||||
| 	} | ||||
| 	return flags&Safelink != 0 && !isSafeLink(dest) && !isMailto(dest) | ||||
| } | ||||
|  | ||||
| func isSmartypantable(node *Node) bool { | ||||
| 	pt := node.Parent.Type | ||||
| 	return pt != Link && pt != CodeBlock && pt != Code | ||||
| } | ||||
|  | ||||
| func appendLanguageAttr(attrs []string, info []byte) []string { | ||||
| 	if len(info) == 0 { | ||||
| 		return attrs | ||||
| 	} | ||||
| 	endOfLang := bytes.IndexAny(info, "\t ") | ||||
| 	if endOfLang < 0 { | ||||
| 		endOfLang = len(info) | ||||
| 	} | ||||
| 	return append(attrs, fmt.Sprintf("class=\"language-%s\"", info[:endOfLang])) | ||||
| } | ||||
|  | ||||
| func (r *HTMLRenderer) tag(w io.Writer, name []byte, attrs []string) { | ||||
| 	w.Write(name) | ||||
| 	if len(attrs) > 0 { | ||||
| 		w.Write(spaceBytes) | ||||
| 		w.Write([]byte(strings.Join(attrs, " "))) | ||||
| 	} | ||||
| 	w.Write(gtBytes) | ||||
| 	r.lastOutputLen = 1 | ||||
| } | ||||
|  | ||||
| func footnoteRef(prefix string, node *Node) []byte { | ||||
| 	urlFrag := prefix + string(slugify(node.Destination)) | ||||
| 	anchor := fmt.Sprintf(`<a href="#fn:%s">%d</a>`, urlFrag, node.NoteID) | ||||
| 	return []byte(fmt.Sprintf(`<sup class="footnote-ref" id="fnref:%s">%s</sup>`, urlFrag, anchor)) | ||||
| } | ||||
|  | ||||
| func footnoteItem(prefix string, slug []byte) []byte { | ||||
| 	return []byte(fmt.Sprintf(`<li id="fn:%s%s">`, prefix, slug)) | ||||
| } | ||||
|  | ||||
| func footnoteReturnLink(prefix, returnLink string, slug []byte) []byte { | ||||
| 	const format = ` <a class="footnote-return" href="#fnref:%s%s">%s</a>` | ||||
| 	return []byte(fmt.Sprintf(format, prefix, slug, returnLink)) | ||||
| } | ||||
|  | ||||
| func itemOpenCR(node *Node) bool { | ||||
| 	if node.Prev == nil { | ||||
| 		return false | ||||
| 	} | ||||
| 	ld := node.Parent.ListData | ||||
| 	return !ld.Tight && ld.ListFlags&ListTypeDefinition == 0 | ||||
| } | ||||
|  | ||||
| func skipParagraphTags(node *Node) bool { | ||||
| 	grandparent := node.Parent.Parent | ||||
| 	if grandparent == nil || grandparent.Type != List { | ||||
| 		return false | ||||
| 	} | ||||
| 	tightOrTerm := grandparent.Tight || node.Parent.ListFlags&ListTypeTerm != 0 | ||||
| 	return grandparent.Type == List && tightOrTerm | ||||
| } | ||||
|  | ||||
| func cellAlignment(align CellAlignFlags) string { | ||||
| 	switch align { | ||||
| 	case TableAlignmentLeft: | ||||
| 		return "left" | ||||
| 	case TableAlignmentRight: | ||||
| 		return "right" | ||||
| 	case TableAlignmentCenter: | ||||
| 		return "center" | ||||
| 	default: | ||||
| 		return "" | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (r *HTMLRenderer) out(w io.Writer, text []byte) { | ||||
| 	if r.disableTags > 0 { | ||||
| 		w.Write(htmlTagRe.ReplaceAll(text, []byte{})) | ||||
| 	} else { | ||||
| 		w.Write(text) | ||||
| 	} | ||||
| 	r.lastOutputLen = len(text) | ||||
| } | ||||
|  | ||||
| func (r *HTMLRenderer) cr(w io.Writer) { | ||||
| 	if r.lastOutputLen > 0 { | ||||
| 		r.out(w, nlBytes) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| var ( | ||||
| 	nlBytes    = []byte{'\n'} | ||||
| 	gtBytes    = []byte{'>'} | ||||
| 	spaceBytes = []byte{' '} | ||||
| ) | ||||
|  | ||||
| var ( | ||||
| 	brTag              = []byte("<br>") | ||||
| 	brXHTMLTag         = []byte("<br />") | ||||
| 	emTag              = []byte("<em>") | ||||
| 	emCloseTag         = []byte("</em>") | ||||
| 	strongTag          = []byte("<strong>") | ||||
| 	strongCloseTag     = []byte("</strong>") | ||||
| 	delTag             = []byte("<del>") | ||||
| 	delCloseTag        = []byte("</del>") | ||||
| 	ttTag              = []byte("<tt>") | ||||
| 	ttCloseTag         = []byte("</tt>") | ||||
| 	aTag               = []byte("<a") | ||||
| 	aCloseTag          = []byte("</a>") | ||||
| 	preTag             = []byte("<pre>") | ||||
| 	preCloseTag        = []byte("</pre>") | ||||
| 	codeTag            = []byte("<code>") | ||||
| 	codeCloseTag       = []byte("</code>") | ||||
| 	pTag               = []byte("<p>") | ||||
| 	pCloseTag          = []byte("</p>") | ||||
| 	blockquoteTag      = []byte("<blockquote>") | ||||
| 	blockquoteCloseTag = []byte("</blockquote>") | ||||
| 	hrTag              = []byte("<hr>") | ||||
| 	hrXHTMLTag         = []byte("<hr />") | ||||
| 	ulTag              = []byte("<ul>") | ||||
| 	ulCloseTag         = []byte("</ul>") | ||||
| 	olTag              = []byte("<ol>") | ||||
| 	olCloseTag         = []byte("</ol>") | ||||
| 	dlTag              = []byte("<dl>") | ||||
| 	dlCloseTag         = []byte("</dl>") | ||||
| 	liTag              = []byte("<li>") | ||||
| 	liCloseTag         = []byte("</li>") | ||||
| 	ddTag              = []byte("<dd>") | ||||
| 	ddCloseTag         = []byte("</dd>") | ||||
| 	dtTag              = []byte("<dt>") | ||||
| 	dtCloseTag         = []byte("</dt>") | ||||
| 	tableTag           = []byte("<table>") | ||||
| 	tableCloseTag      = []byte("</table>") | ||||
| 	tdTag              = []byte("<td") | ||||
| 	tdCloseTag         = []byte("</td>") | ||||
| 	thTag              = []byte("<th") | ||||
| 	thCloseTag         = []byte("</th>") | ||||
| 	theadTag           = []byte("<thead>") | ||||
| 	theadCloseTag      = []byte("</thead>") | ||||
| 	tbodyTag           = []byte("<tbody>") | ||||
| 	tbodyCloseTag      = []byte("</tbody>") | ||||
| 	trTag              = []byte("<tr>") | ||||
| 	trCloseTag         = []byte("</tr>") | ||||
| 	h1Tag              = []byte("<h1") | ||||
| 	h1CloseTag         = []byte("</h1>") | ||||
| 	h2Tag              = []byte("<h2") | ||||
| 	h2CloseTag         = []byte("</h2>") | ||||
| 	h3Tag              = []byte("<h3") | ||||
| 	h3CloseTag         = []byte("</h3>") | ||||
| 	h4Tag              = []byte("<h4") | ||||
| 	h4CloseTag         = []byte("</h4>") | ||||
| 	h5Tag              = []byte("<h5") | ||||
| 	h5CloseTag         = []byte("</h5>") | ||||
| 	h6Tag              = []byte("<h6") | ||||
| 	h6CloseTag         = []byte("</h6>") | ||||
|  | ||||
| 	footnotesDivBytes      = []byte("\n<div class=\"footnotes\">\n\n") | ||||
| 	footnotesCloseDivBytes = []byte("\n</div>\n") | ||||
| ) | ||||
|  | ||||
| func headingTagsFromLevel(level int) ([]byte, []byte) { | ||||
| 	if level <= 1 { | ||||
| 		return h1Tag, h1CloseTag | ||||
| 	} | ||||
| 	switch level { | ||||
| 	case 2: | ||||
| 		return h2Tag, h2CloseTag | ||||
| 	case 3: | ||||
| 		return h3Tag, h3CloseTag | ||||
| 	case 4: | ||||
| 		return h4Tag, h4CloseTag | ||||
| 	case 5: | ||||
| 		return h5Tag, h5CloseTag | ||||
| 	} | ||||
| 	return h6Tag, h6CloseTag | ||||
| } | ||||
|  | ||||
| func (r *HTMLRenderer) outHRTag(w io.Writer) { | ||||
| 	if r.Flags&UseXHTML == 0 { | ||||
| 		r.out(w, hrTag) | ||||
| 	} else { | ||||
| 		r.out(w, hrXHTMLTag) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // RenderNode is a default renderer of a single node of a syntax tree. For | ||||
| // block nodes it will be called twice: first time with entering=true, second | ||||
| // time with entering=false, so that it could know when it's working on an open | ||||
| // tag and when on close. It writes the result to w. | ||||
| // | ||||
| // The return value is a way to tell the calling walker to adjust its walk | ||||
| // pattern: e.g. it can terminate the traversal by returning Terminate. Or it | ||||
| // can ask the walker to skip a subtree of this node by returning SkipChildren. | ||||
| // The typical behavior is to return GoToNext, which asks for the usual | ||||
| // traversal to the next node. | ||||
| func (r *HTMLRenderer) RenderNode(w io.Writer, node *Node, entering bool) WalkStatus { | ||||
| 	attrs := []string{} | ||||
| 	switch node.Type { | ||||
| 	case Text: | ||||
| 		if r.Flags&Smartypants != 0 { | ||||
| 			var tmp bytes.Buffer | ||||
| 			escapeHTML(&tmp, node.Literal) | ||||
| 			r.sr.Process(w, tmp.Bytes()) | ||||
| 		} else { | ||||
| 			if node.Parent.Type == Link { | ||||
| 				escLink(w, node.Literal) | ||||
| 			} else { | ||||
| 				escapeHTML(w, node.Literal) | ||||
| 			} | ||||
| 		} | ||||
| 	case Softbreak: | ||||
| 		r.cr(w) | ||||
| 		// TODO: make it configurable via out(renderer.softbreak) | ||||
| 	case Hardbreak: | ||||
| 		if r.Flags&UseXHTML == 0 { | ||||
| 			r.out(w, brTag) | ||||
| 		} else { | ||||
| 			r.out(w, brXHTMLTag) | ||||
| 		} | ||||
| 		r.cr(w) | ||||
| 	case Emph: | ||||
| 		if entering { | ||||
| 			r.out(w, emTag) | ||||
| 		} else { | ||||
| 			r.out(w, emCloseTag) | ||||
| 		} | ||||
| 	case Strong: | ||||
| 		if entering { | ||||
| 			r.out(w, strongTag) | ||||
| 		} else { | ||||
| 			r.out(w, strongCloseTag) | ||||
| 		} | ||||
| 	case Del: | ||||
| 		if entering { | ||||
| 			r.out(w, delTag) | ||||
| 		} else { | ||||
| 			r.out(w, delCloseTag) | ||||
| 		} | ||||
| 	case HTMLSpan: | ||||
| 		if r.Flags&SkipHTML != 0 { | ||||
| 			break | ||||
| 		} | ||||
| 		r.out(w, node.Literal) | ||||
| 	case Link: | ||||
| 		// mark it but don't link it if it is not a safe link: no smartypants | ||||
| 		dest := node.LinkData.Destination | ||||
| 		if needSkipLink(r.Flags, dest) { | ||||
| 			if entering { | ||||
| 				r.out(w, ttTag) | ||||
| 			} else { | ||||
| 				r.out(w, ttCloseTag) | ||||
| 			} | ||||
| 		} else { | ||||
| 			if entering { | ||||
| 				dest = r.addAbsPrefix(dest) | ||||
| 				var hrefBuf bytes.Buffer | ||||
| 				hrefBuf.WriteString("href=\"") | ||||
| 				escLink(&hrefBuf, dest) | ||||
| 				hrefBuf.WriteByte('"') | ||||
| 				attrs = append(attrs, hrefBuf.String()) | ||||
| 				if node.NoteID != 0 { | ||||
| 					r.out(w, footnoteRef(r.FootnoteAnchorPrefix, node)) | ||||
| 					break | ||||
| 				} | ||||
| 				attrs = appendLinkAttrs(attrs, r.Flags, dest) | ||||
| 				if len(node.LinkData.Title) > 0 { | ||||
| 					var titleBuff bytes.Buffer | ||||
| 					titleBuff.WriteString("title=\"") | ||||
| 					escapeHTML(&titleBuff, node.LinkData.Title) | ||||
| 					titleBuff.WriteByte('"') | ||||
| 					attrs = append(attrs, titleBuff.String()) | ||||
| 				} | ||||
| 				r.tag(w, aTag, attrs) | ||||
| 			} else { | ||||
| 				if node.NoteID != 0 { | ||||
| 					break | ||||
| 				} | ||||
| 				r.out(w, aCloseTag) | ||||
| 			} | ||||
| 		} | ||||
| 	case Image: | ||||
| 		if r.Flags&SkipImages != 0 { | ||||
| 			return SkipChildren | ||||
| 		} | ||||
| 		if entering { | ||||
| 			dest := node.LinkData.Destination | ||||
| 			dest = r.addAbsPrefix(dest) | ||||
| 			if r.disableTags == 0 { | ||||
| 				//if options.safe && potentiallyUnsafe(dest) { | ||||
| 				//out(w, `<img src="" alt="`) | ||||
| 				//} else { | ||||
| 				r.out(w, []byte(`<img src="`)) | ||||
| 				escLink(w, dest) | ||||
| 				r.out(w, []byte(`" alt="`)) | ||||
| 				//} | ||||
| 			} | ||||
| 			r.disableTags++ | ||||
| 		} else { | ||||
| 			r.disableTags-- | ||||
| 			if r.disableTags == 0 { | ||||
| 				if node.LinkData.Title != nil { | ||||
| 					r.out(w, []byte(`" title="`)) | ||||
| 					escapeHTML(w, node.LinkData.Title) | ||||
| 				} | ||||
| 				r.out(w, []byte(`" />`)) | ||||
| 			} | ||||
| 		} | ||||
| 	case Code: | ||||
| 		r.out(w, codeTag) | ||||
| 		escapeHTML(w, node.Literal) | ||||
| 		r.out(w, codeCloseTag) | ||||
| 	case Document: | ||||
| 		break | ||||
| 	case Paragraph: | ||||
| 		if skipParagraphTags(node) { | ||||
| 			break | ||||
| 		} | ||||
| 		if entering { | ||||
| 			// TODO: untangle this clusterfuck about when the newlines need | ||||
| 			// to be added and when not. | ||||
| 			if node.Prev != nil { | ||||
| 				switch node.Prev.Type { | ||||
| 				case HTMLBlock, List, Paragraph, Heading, CodeBlock, BlockQuote, HorizontalRule: | ||||
| 					r.cr(w) | ||||
| 				} | ||||
| 			} | ||||
| 			if node.Parent.Type == BlockQuote && node.Prev == nil { | ||||
| 				r.cr(w) | ||||
| 			} | ||||
| 			r.out(w, pTag) | ||||
| 		} else { | ||||
| 			r.out(w, pCloseTag) | ||||
| 			if !(node.Parent.Type == Item && node.Next == nil) { | ||||
| 				r.cr(w) | ||||
| 			} | ||||
| 		} | ||||
| 	case BlockQuote: | ||||
| 		if entering { | ||||
| 			r.cr(w) | ||||
| 			r.out(w, blockquoteTag) | ||||
| 		} else { | ||||
| 			r.out(w, blockquoteCloseTag) | ||||
| 			r.cr(w) | ||||
| 		} | ||||
| 	case HTMLBlock: | ||||
| 		if r.Flags&SkipHTML != 0 { | ||||
| 			break | ||||
| 		} | ||||
| 		r.cr(w) | ||||
| 		r.out(w, node.Literal) | ||||
| 		r.cr(w) | ||||
| 	case Heading: | ||||
| 		headingLevel := r.HTMLRendererParameters.HeadingLevelOffset + node.Level | ||||
| 		openTag, closeTag := headingTagsFromLevel(headingLevel) | ||||
| 		if entering { | ||||
| 			if node.IsTitleblock { | ||||
| 				attrs = append(attrs, `class="title"`) | ||||
| 			} | ||||
| 			if node.HeadingID != "" { | ||||
| 				id := r.ensureUniqueHeadingID(node.HeadingID) | ||||
| 				if r.HeadingIDPrefix != "" { | ||||
| 					id = r.HeadingIDPrefix + id | ||||
| 				} | ||||
| 				if r.HeadingIDSuffix != "" { | ||||
| 					id = id + r.HeadingIDSuffix | ||||
| 				} | ||||
| 				attrs = append(attrs, fmt.Sprintf(`id="%s"`, id)) | ||||
| 			} | ||||
| 			r.cr(w) | ||||
| 			r.tag(w, openTag, attrs) | ||||
| 		} else { | ||||
| 			r.out(w, closeTag) | ||||
| 			if !(node.Parent.Type == Item && node.Next == nil) { | ||||
| 				r.cr(w) | ||||
| 			} | ||||
| 		} | ||||
| 	case HorizontalRule: | ||||
| 		r.cr(w) | ||||
| 		r.outHRTag(w) | ||||
| 		r.cr(w) | ||||
| 	case List: | ||||
| 		openTag := ulTag | ||||
| 		closeTag := ulCloseTag | ||||
| 		if node.ListFlags&ListTypeOrdered != 0 { | ||||
| 			openTag = olTag | ||||
| 			closeTag = olCloseTag | ||||
| 		} | ||||
| 		if node.ListFlags&ListTypeDefinition != 0 { | ||||
| 			openTag = dlTag | ||||
| 			closeTag = dlCloseTag | ||||
| 		} | ||||
| 		if entering { | ||||
| 			if node.IsFootnotesList { | ||||
| 				r.out(w, footnotesDivBytes) | ||||
| 				r.outHRTag(w) | ||||
| 				r.cr(w) | ||||
| 			} | ||||
| 			r.cr(w) | ||||
| 			if node.Parent.Type == Item && node.Parent.Parent.Tight { | ||||
| 				r.cr(w) | ||||
| 			} | ||||
| 			r.tag(w, openTag[:len(openTag)-1], attrs) | ||||
| 			r.cr(w) | ||||
| 		} else { | ||||
| 			r.out(w, closeTag) | ||||
| 			//cr(w) | ||||
| 			//if node.parent.Type != Item { | ||||
| 			//	cr(w) | ||||
| 			//} | ||||
| 			if node.Parent.Type == Item && node.Next != nil { | ||||
| 				r.cr(w) | ||||
| 			} | ||||
| 			if node.Parent.Type == Document || node.Parent.Type == BlockQuote { | ||||
| 				r.cr(w) | ||||
| 			} | ||||
| 			if node.IsFootnotesList { | ||||
| 				r.out(w, footnotesCloseDivBytes) | ||||
| 			} | ||||
| 		} | ||||
| 	case Item: | ||||
| 		openTag := liTag | ||||
| 		closeTag := liCloseTag | ||||
| 		if node.ListFlags&ListTypeDefinition != 0 { | ||||
| 			openTag = ddTag | ||||
| 			closeTag = ddCloseTag | ||||
| 		} | ||||
| 		if node.ListFlags&ListTypeTerm != 0 { | ||||
| 			openTag = dtTag | ||||
| 			closeTag = dtCloseTag | ||||
| 		} | ||||
| 		if entering { | ||||
| 			if itemOpenCR(node) { | ||||
| 				r.cr(w) | ||||
| 			} | ||||
| 			if node.ListData.RefLink != nil { | ||||
| 				slug := slugify(node.ListData.RefLink) | ||||
| 				r.out(w, footnoteItem(r.FootnoteAnchorPrefix, slug)) | ||||
| 				break | ||||
| 			} | ||||
| 			r.out(w, openTag) | ||||
| 		} else { | ||||
| 			if node.ListData.RefLink != nil { | ||||
| 				slug := slugify(node.ListData.RefLink) | ||||
| 				if r.Flags&FootnoteReturnLinks != 0 { | ||||
| 					r.out(w, footnoteReturnLink(r.FootnoteAnchorPrefix, r.FootnoteReturnLinkContents, slug)) | ||||
| 				} | ||||
| 			} | ||||
| 			r.out(w, closeTag) | ||||
| 			r.cr(w) | ||||
| 		} | ||||
| 	case CodeBlock: | ||||
| 		attrs = appendLanguageAttr(attrs, node.Info) | ||||
| 		r.cr(w) | ||||
| 		r.out(w, preTag) | ||||
| 		r.tag(w, codeTag[:len(codeTag)-1], attrs) | ||||
| 		escapeHTML(w, node.Literal) | ||||
| 		r.out(w, codeCloseTag) | ||||
| 		r.out(w, preCloseTag) | ||||
| 		if node.Parent.Type != Item { | ||||
| 			r.cr(w) | ||||
| 		} | ||||
| 	case Table: | ||||
| 		if entering { | ||||
| 			r.cr(w) | ||||
| 			r.out(w, tableTag) | ||||
| 		} else { | ||||
| 			r.out(w, tableCloseTag) | ||||
| 			r.cr(w) | ||||
| 		} | ||||
| 	case TableCell: | ||||
| 		openTag := tdTag | ||||
| 		closeTag := tdCloseTag | ||||
| 		if node.IsHeader { | ||||
| 			openTag = thTag | ||||
| 			closeTag = thCloseTag | ||||
| 		} | ||||
| 		if entering { | ||||
| 			align := cellAlignment(node.Align) | ||||
| 			if align != "" { | ||||
| 				attrs = append(attrs, fmt.Sprintf(`align="%s"`, align)) | ||||
| 			} | ||||
| 			if node.Prev == nil { | ||||
| 				r.cr(w) | ||||
| 			} | ||||
| 			r.tag(w, openTag, attrs) | ||||
| 		} else { | ||||
| 			r.out(w, closeTag) | ||||
| 			r.cr(w) | ||||
| 		} | ||||
| 	case TableHead: | ||||
| 		if entering { | ||||
| 			r.cr(w) | ||||
| 			r.out(w, theadTag) | ||||
| 		} else { | ||||
| 			r.out(w, theadCloseTag) | ||||
| 			r.cr(w) | ||||
| 		} | ||||
| 	case TableBody: | ||||
| 		if entering { | ||||
| 			r.cr(w) | ||||
| 			r.out(w, tbodyTag) | ||||
| 			// XXX: this is to adhere to a rather silly test. Should fix test. | ||||
| 			if node.FirstChild == nil { | ||||
| 				r.cr(w) | ||||
| 			} | ||||
| 		} else { | ||||
| 			r.out(w, tbodyCloseTag) | ||||
| 			r.cr(w) | ||||
| 		} | ||||
| 	case TableRow: | ||||
| 		if entering { | ||||
| 			r.cr(w) | ||||
| 			r.out(w, trTag) | ||||
| 		} else { | ||||
| 			r.out(w, trCloseTag) | ||||
| 			r.cr(w) | ||||
| 		} | ||||
| 	default: | ||||
| 		panic("Unknown node type " + node.Type.String()) | ||||
| 	} | ||||
| 	return GoToNext | ||||
| } | ||||
|  | ||||
| // RenderHeader writes HTML document preamble and TOC if requested. | ||||
| func (r *HTMLRenderer) RenderHeader(w io.Writer, ast *Node) { | ||||
| 	r.writeDocumentHeader(w) | ||||
| 	if r.Flags&TOC != 0 { | ||||
| 		r.writeTOC(w, ast) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // RenderFooter writes HTML document footer. | ||||
| func (r *HTMLRenderer) RenderFooter(w io.Writer, ast *Node) { | ||||
| 	if r.Flags&CompletePage == 0 { | ||||
| 		return | ||||
| 	} | ||||
| 	io.WriteString(w, "\n</body>\n</html>\n") | ||||
| } | ||||
|  | ||||
| func (r *HTMLRenderer) writeDocumentHeader(w io.Writer) { | ||||
| 	if r.Flags&CompletePage == 0 { | ||||
| 		return | ||||
| 	} | ||||
| 	ending := "" | ||||
| 	if r.Flags&UseXHTML != 0 { | ||||
| 		io.WriteString(w, "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" ") | ||||
| 		io.WriteString(w, "\"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n") | ||||
| 		io.WriteString(w, "<html xmlns=\"http://www.w3.org/1999/xhtml\">\n") | ||||
| 		ending = " /" | ||||
| 	} else { | ||||
| 		io.WriteString(w, "<!DOCTYPE html>\n") | ||||
| 		io.WriteString(w, "<html>\n") | ||||
| 	} | ||||
| 	io.WriteString(w, "<head>\n") | ||||
| 	io.WriteString(w, "  <title>") | ||||
| 	if r.Flags&Smartypants != 0 { | ||||
| 		r.sr.Process(w, []byte(r.Title)) | ||||
| 	} else { | ||||
| 		escapeHTML(w, []byte(r.Title)) | ||||
| 	} | ||||
| 	io.WriteString(w, "</title>\n") | ||||
| 	io.WriteString(w, "  <meta name=\"GENERATOR\" content=\"Blackfriday Markdown Processor v") | ||||
| 	io.WriteString(w, Version) | ||||
| 	io.WriteString(w, "\"") | ||||
| 	io.WriteString(w, ending) | ||||
| 	io.WriteString(w, ">\n") | ||||
| 	io.WriteString(w, "  <meta charset=\"utf-8\"") | ||||
| 	io.WriteString(w, ending) | ||||
| 	io.WriteString(w, ">\n") | ||||
| 	if r.CSS != "" { | ||||
| 		io.WriteString(w, "  <link rel=\"stylesheet\" type=\"text/css\" href=\"") | ||||
| 		escapeHTML(w, []byte(r.CSS)) | ||||
| 		io.WriteString(w, "\"") | ||||
| 		io.WriteString(w, ending) | ||||
| 		io.WriteString(w, ">\n") | ||||
| 	} | ||||
| 	if r.Icon != "" { | ||||
| 		io.WriteString(w, "  <link rel=\"icon\" type=\"image/x-icon\" href=\"") | ||||
| 		escapeHTML(w, []byte(r.Icon)) | ||||
| 		io.WriteString(w, "\"") | ||||
| 		io.WriteString(w, ending) | ||||
| 		io.WriteString(w, ">\n") | ||||
| 	} | ||||
| 	io.WriteString(w, "</head>\n") | ||||
| 	io.WriteString(w, "<body>\n\n") | ||||
| } | ||||
|  | ||||
| func (r *HTMLRenderer) writeTOC(w io.Writer, ast *Node) { | ||||
| 	buf := bytes.Buffer{} | ||||
|  | ||||
| 	inHeading := false | ||||
| 	tocLevel := 0 | ||||
| 	headingCount := 0 | ||||
|  | ||||
| 	ast.Walk(func(node *Node, entering bool) WalkStatus { | ||||
| 		if node.Type == Heading && !node.HeadingData.IsTitleblock { | ||||
| 			inHeading = entering | ||||
| 			if entering { | ||||
| 				node.HeadingID = fmt.Sprintf("toc_%d", headingCount) | ||||
| 				if node.Level == tocLevel { | ||||
| 					buf.WriteString("</li>\n\n<li>") | ||||
| 				} else if node.Level < tocLevel { | ||||
| 					for node.Level < tocLevel { | ||||
| 						tocLevel-- | ||||
| 						buf.WriteString("</li>\n</ul>") | ||||
| 					} | ||||
| 					buf.WriteString("</li>\n\n<li>") | ||||
| 				} else { | ||||
| 					for node.Level > tocLevel { | ||||
| 						tocLevel++ | ||||
| 						buf.WriteString("\n<ul>\n<li>") | ||||
| 					} | ||||
| 				} | ||||
|  | ||||
| 				fmt.Fprintf(&buf, `<a href="#toc_%d">`, headingCount) | ||||
| 				headingCount++ | ||||
| 			} else { | ||||
| 				buf.WriteString("</a>") | ||||
| 			} | ||||
| 			return GoToNext | ||||
| 		} | ||||
|  | ||||
| 		if inHeading { | ||||
| 			return r.RenderNode(&buf, node, entering) | ||||
| 		} | ||||
|  | ||||
| 		return GoToNext | ||||
| 	}) | ||||
|  | ||||
| 	for ; tocLevel > 0; tocLevel-- { | ||||
| 		buf.WriteString("</li>\n</ul>") | ||||
| 	} | ||||
|  | ||||
| 	if buf.Len() > 0 { | ||||
| 		io.WriteString(w, "<nav>\n") | ||||
| 		w.Write(buf.Bytes()) | ||||
| 		io.WriteString(w, "\n\n</nav>\n") | ||||
| 	} | ||||
| 	r.lastOutputLen = buf.Len() | ||||
| } | ||||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							| @@ -1,103 +1,93 @@ | ||||
| // | ||||
| // Blackfriday Markdown Processor | ||||
| // Available at http://github.com/russross/blackfriday | ||||
| // | ||||
| // Copyright © 2011 Russ Ross <russ@russross.com>. | ||||
| // Distributed under the Simplified BSD License. | ||||
| // See README.md for details. | ||||
| // | ||||
| 
 | ||||
| // | ||||
| // | ||||
| // Markdown parsing and processing | ||||
| // | ||||
| // | ||||
| 
 | ||||
| package blackfriday | ||||
| 
 | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"strings" | ||||
| 	"unicode/utf8" | ||||
| ) | ||||
| 
 | ||||
| const VERSION = "1.5" | ||||
| // | ||||
| // Markdown parsing and processing | ||||
| // | ||||
| 
 | ||||
| // Version string of the package. Appears in the rendered document when | ||||
| // CompletePage flag is on. | ||||
| const Version = "2.0" | ||||
| 
 | ||||
| // Extensions is a bitwise or'ed collection of enabled Blackfriday's | ||||
| // extensions. | ||||
| type Extensions int | ||||
| 
 | ||||
| // These are the supported markdown parsing extensions. | ||||
| // OR these values together to select multiple extensions. | ||||
| const ( | ||||
| 	EXTENSION_NO_INTRA_EMPHASIS          = 1 << iota // ignore emphasis markers inside words | ||||
| 	EXTENSION_TABLES                                 // render tables | ||||
| 	EXTENSION_FENCED_CODE                            // render fenced code blocks | ||||
| 	EXTENSION_AUTOLINK                               // detect embedded URLs that are not explicitly marked | ||||
| 	EXTENSION_STRIKETHROUGH                          // strikethrough text using ~~test~~ | ||||
| 	EXTENSION_LAX_HTML_BLOCKS                        // loosen up HTML block parsing rules | ||||
| 	EXTENSION_SPACE_HEADERS                          // be strict about prefix header rules | ||||
| 	EXTENSION_HARD_LINE_BREAK                        // translate newlines into line breaks | ||||
| 	EXTENSION_TAB_SIZE_EIGHT                         // expand tabs to eight spaces instead of four | ||||
| 	EXTENSION_FOOTNOTES                              // Pandoc-style footnotes | ||||
| 	EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK             // No need to insert an empty line to start a (code, quote, ordered list, unordered list) block | ||||
| 	EXTENSION_HEADER_IDS                             // specify header IDs  with {#id} | ||||
| 	EXTENSION_TITLEBLOCK                             // Titleblock ala pandoc | ||||
| 	EXTENSION_AUTO_HEADER_IDS                        // Create the header ID from the text | ||||
| 	EXTENSION_BACKSLASH_LINE_BREAK                   // translate trailing backslashes into line breaks | ||||
| 	EXTENSION_DEFINITION_LISTS                       // render definition lists | ||||
| 	EXTENSION_JOIN_LINES                             // delete newline and join lines | ||||
| 	NoExtensions           Extensions = 0 | ||||
| 	NoIntraEmphasis        Extensions = 1 << iota // Ignore emphasis markers inside words | ||||
| 	Tables                                        // Render tables | ||||
| 	FencedCode                                    // Render fenced code blocks | ||||
| 	Autolink                                      // Detect embedded URLs that are not explicitly marked | ||||
| 	Strikethrough                                 // Strikethrough text using ~~test~~ | ||||
| 	LaxHTMLBlocks                                 // Loosen up HTML block parsing rules | ||||
| 	SpaceHeadings                                 // Be strict about prefix heading rules | ||||
| 	HardLineBreak                                 // Translate newlines into line breaks | ||||
| 	TabSizeEight                                  // Expand tabs to eight spaces instead of four | ||||
| 	Footnotes                                     // Pandoc-style footnotes | ||||
| 	NoEmptyLineBeforeBlock                        // No need to insert an empty line to start a (code, quote, ordered list, unordered list) block | ||||
| 	HeadingIDs                                    // specify heading IDs  with {#id} | ||||
| 	Titleblock                                    // Titleblock ala pandoc | ||||
| 	AutoHeadingIDs                                // Create the heading ID from the text | ||||
| 	BackslashLineBreak                            // Translate trailing backslashes into line breaks | ||||
| 	DefinitionLists                               // Render definition lists | ||||
| 
 | ||||
| 	commonHtmlFlags = 0 | | ||||
| 		HTML_USE_XHTML | | ||||
| 		HTML_USE_SMARTYPANTS | | ||||
| 		HTML_SMARTYPANTS_FRACTIONS | | ||||
| 		HTML_SMARTYPANTS_DASHES | | ||||
| 		HTML_SMARTYPANTS_LATEX_DASHES | ||||
| 	CommonHTMLFlags HTMLFlags = UseXHTML | Smartypants | | ||||
| 		SmartypantsFractions | SmartypantsDashes | SmartypantsLatexDashes | ||||
| 
 | ||||
| 	commonExtensions = 0 | | ||||
| 		EXTENSION_NO_INTRA_EMPHASIS | | ||||
| 		EXTENSION_TABLES | | ||||
| 		EXTENSION_FENCED_CODE | | ||||
| 		EXTENSION_AUTOLINK | | ||||
| 		EXTENSION_STRIKETHROUGH | | ||||
| 		EXTENSION_SPACE_HEADERS | | ||||
| 		EXTENSION_HEADER_IDS | | ||||
| 		EXTENSION_BACKSLASH_LINE_BREAK | | ||||
| 		EXTENSION_DEFINITION_LISTS | ||||
| 	CommonExtensions Extensions = NoIntraEmphasis | Tables | FencedCode | | ||||
| 		Autolink | Strikethrough | SpaceHeadings | HeadingIDs | | ||||
| 		BackslashLineBreak | DefinitionLists | ||||
| ) | ||||
| 
 | ||||
| // These are the possible flag values for the link renderer. | ||||
| // Only a single one of these values will be used; they are not ORed together. | ||||
| // These are mostly of interest if you are writing a new output format. | ||||
| const ( | ||||
| 	LINK_TYPE_NOT_AUTOLINK = iota | ||||
| 	LINK_TYPE_NORMAL | ||||
| 	LINK_TYPE_EMAIL | ||||
| ) | ||||
| // ListType contains bitwise or'ed flags for list and list item objects. | ||||
| type ListType int | ||||
| 
 | ||||
| // These are the possible flag values for the ListItem renderer. | ||||
| // Multiple flag values may be ORed together. | ||||
| // These are mostly of interest if you are writing a new output format. | ||||
| const ( | ||||
| 	LIST_TYPE_ORDERED = 1 << iota | ||||
| 	LIST_TYPE_DEFINITION | ||||
| 	LIST_TYPE_TERM | ||||
| 	LIST_ITEM_CONTAINS_BLOCK | ||||
| 	LIST_ITEM_BEGINNING_OF_LIST | ||||
| 	LIST_ITEM_END_OF_LIST | ||||
| 	ListTypeOrdered ListType = 1 << iota | ||||
| 	ListTypeDefinition | ||||
| 	ListTypeTerm | ||||
| 
 | ||||
| 	ListItemContainsBlock | ||||
| 	ListItemBeginningOfList // TODO: figure out if this is of any use now | ||||
| 	ListItemEndOfList | ||||
| ) | ||||
| 
 | ||||
| // CellAlignFlags holds a type of alignment in a table cell. | ||||
| type CellAlignFlags int | ||||
| 
 | ||||
| // These are the possible flag values for the table cell renderer. | ||||
| // Only a single one of these values will be used; they are not ORed together. | ||||
| // These are mostly of interest if you are writing a new output format. | ||||
| const ( | ||||
| 	TABLE_ALIGNMENT_LEFT = 1 << iota | ||||
| 	TABLE_ALIGNMENT_RIGHT | ||||
| 	TABLE_ALIGNMENT_CENTER = (TABLE_ALIGNMENT_LEFT | TABLE_ALIGNMENT_RIGHT) | ||||
| 	TableAlignmentLeft CellAlignFlags = 1 << iota | ||||
| 	TableAlignmentRight | ||||
| 	TableAlignmentCenter = (TableAlignmentLeft | TableAlignmentRight) | ||||
| ) | ||||
| 
 | ||||
| // The size of a tab stop. | ||||
| const ( | ||||
| 	TAB_SIZE_DEFAULT = 4 | ||||
| 	TAB_SIZE_EIGHT   = 8 | ||||
| 	TabSizeDefault = 4 | ||||
| 	TabSizeDouble  = 8 | ||||
| ) | ||||
| 
 | ||||
| // blockTags is a set of tags that are recognized as HTML block tags. | ||||
| @@ -145,86 +135,66 @@ var blockTags = map[string]struct{}{ | ||||
| 	"video":      {}, | ||||
| } | ||||
| 
 | ||||
| // Renderer is the rendering interface. | ||||
| // This is mostly of interest if you are implementing a new rendering format. | ||||
| // Renderer is the rendering interface. This is mostly of interest if you are | ||||
| // implementing a new rendering format. | ||||
| // | ||||
| // When a byte slice is provided, it contains the (rendered) contents of the | ||||
| // element. | ||||
| // | ||||
| // When a callback is provided instead, it will write the contents of the | ||||
| // respective element directly to the output buffer and return true on success. | ||||
| // If the callback returns false, the rendering function should reset the | ||||
| // output buffer as though it had never been called. | ||||
| // | ||||
| // Currently Html and Latex implementations are provided | ||||
| // Only an HTML implementation is provided in this repository, see the README | ||||
| // for external implementations. | ||||
| type Renderer interface { | ||||
| 	// block-level callbacks | ||||
| 	BlockCode(out *bytes.Buffer, text []byte, infoString string) | ||||
| 	BlockQuote(out *bytes.Buffer, text []byte) | ||||
| 	BlockHtml(out *bytes.Buffer, text []byte) | ||||
| 	Header(out *bytes.Buffer, text func() bool, level int, id string) | ||||
| 	HRule(out *bytes.Buffer) | ||||
| 	List(out *bytes.Buffer, text func() bool, flags int) | ||||
| 	ListItem(out *bytes.Buffer, text []byte, flags int) | ||||
| 	Paragraph(out *bytes.Buffer, text func() bool) | ||||
| 	Table(out *bytes.Buffer, header []byte, body []byte, columnData []int) | ||||
| 	TableRow(out *bytes.Buffer, text []byte) | ||||
| 	TableHeaderCell(out *bytes.Buffer, text []byte, flags int) | ||||
| 	TableCell(out *bytes.Buffer, text []byte, flags int) | ||||
| 	Footnotes(out *bytes.Buffer, text func() bool) | ||||
| 	FootnoteItem(out *bytes.Buffer, name, text []byte, flags int) | ||||
| 	TitleBlock(out *bytes.Buffer, text []byte) | ||||
| 	// RenderNode is the main rendering method. It will be called once for | ||||
| 	// every leaf node and twice for every non-leaf node (first with | ||||
| 	// entering=true, then with entering=false). The method should write its | ||||
| 	// rendition of the node to the supplied writer w. | ||||
| 	RenderNode(w io.Writer, node *Node, entering bool) WalkStatus | ||||
| 
 | ||||
| 	// Span-level callbacks | ||||
| 	AutoLink(out *bytes.Buffer, link []byte, kind int) | ||||
| 	CodeSpan(out *bytes.Buffer, text []byte) | ||||
| 	DoubleEmphasis(out *bytes.Buffer, text []byte) | ||||
| 	Emphasis(out *bytes.Buffer, text []byte) | ||||
| 	Image(out *bytes.Buffer, link []byte, title []byte, alt []byte) | ||||
| 	LineBreak(out *bytes.Buffer) | ||||
| 	Link(out *bytes.Buffer, link []byte, title []byte, content []byte) | ||||
| 	RawHtmlTag(out *bytes.Buffer, tag []byte) | ||||
| 	TripleEmphasis(out *bytes.Buffer, text []byte) | ||||
| 	StrikeThrough(out *bytes.Buffer, text []byte) | ||||
| 	FootnoteRef(out *bytes.Buffer, ref []byte, id int) | ||||
| 	// RenderHeader is a method that allows the renderer to produce some | ||||
| 	// content preceding the main body of the output document. The header is | ||||
| 	// understood in the broad sense here. For example, the default HTML | ||||
| 	// renderer will write not only the HTML document preamble, but also the | ||||
| 	// table of contents if it was requested. | ||||
| 	// | ||||
| 	// The method will be passed an entire document tree, in case a particular | ||||
| 	// implementation needs to inspect it to produce output. | ||||
| 	// | ||||
| 	// The output should be written to the supplied writer w. If your | ||||
| 	// implementation has no header to write, supply an empty implementation. | ||||
| 	RenderHeader(w io.Writer, ast *Node) | ||||
| 
 | ||||
| 	// Low-level callbacks | ||||
| 	Entity(out *bytes.Buffer, entity []byte) | ||||
| 	NormalText(out *bytes.Buffer, text []byte) | ||||
| 
 | ||||
| 	// Header and footer | ||||
| 	DocumentHeader(out *bytes.Buffer) | ||||
| 	DocumentFooter(out *bytes.Buffer) | ||||
| 
 | ||||
| 	GetFlags() int | ||||
| 	// RenderFooter is a symmetric counterpart of RenderHeader. | ||||
| 	RenderFooter(w io.Writer, ast *Node) | ||||
| } | ||||
| 
 | ||||
| // Callback functions for inline parsing. One such function is defined | ||||
| // for each character that triggers a response when parsing inline data. | ||||
| type inlineParser func(p *parser, out *bytes.Buffer, data []byte, offset int) int | ||||
| type inlineParser func(p *Markdown, data []byte, offset int) (int, *Node) | ||||
| 
 | ||||
| // Parser holds runtime state used by the parser. | ||||
| // This is constructed by the Markdown function. | ||||
| type parser struct { | ||||
| 	r              Renderer | ||||
| 	refOverride    ReferenceOverrideFunc | ||||
| 	refs           map[string]*reference | ||||
| 	inlineCallback [256]inlineParser | ||||
| 	flags          int | ||||
| 	nesting        int | ||||
| 	maxNesting     int | ||||
| 	insideLink     bool | ||||
| // Markdown is a type that holds extensions and the runtime state used by | ||||
| // Parse, and the renderer. You can not use it directly, construct it with New. | ||||
| type Markdown struct { | ||||
| 	renderer          Renderer | ||||
| 	referenceOverride ReferenceOverrideFunc | ||||
| 	refs              map[string]*reference | ||||
| 	inlineCallback    [256]inlineParser | ||||
| 	extensions        Extensions | ||||
| 	nesting           int | ||||
| 	maxNesting        int | ||||
| 	insideLink        bool | ||||
| 
 | ||||
| 	// Footnotes need to be ordered as well as available to quickly check for | ||||
| 	// presence. If a ref is also a footnote, it's stored both in refs and here | ||||
| 	// in notes. Slice is nil if footnotes not enabled. | ||||
| 	notes       []*reference | ||||
| 	notesRecord map[string]struct{} | ||||
| 	notes []*reference | ||||
| 
 | ||||
| 	doc                  *Node | ||||
| 	tip                  *Node // = doc | ||||
| 	oldTip               *Node | ||||
| 	lastMatchedContainer *Node // = doc | ||||
| 	allClosed            bool | ||||
| } | ||||
| 
 | ||||
| func (p *parser) getRef(refid string) (ref *reference, found bool) { | ||||
| 	if p.refOverride != nil { | ||||
| 		r, overridden := p.refOverride(refid) | ||||
| func (p *Markdown) getRef(refid string) (ref *reference, found bool) { | ||||
| 	if p.referenceOverride != nil { | ||||
| 		r, overridden := p.referenceOverride(refid) | ||||
| 		if overridden { | ||||
| 			if r == nil { | ||||
| 				return nil, false | ||||
| @@ -232,7 +202,7 @@ func (p *parser) getRef(refid string) (ref *reference, found bool) { | ||||
| 			return &reference{ | ||||
| 				link:     []byte(r.Link), | ||||
| 				title:    []byte(r.Title), | ||||
| 				noteId:   0, | ||||
| 				noteID:   0, | ||||
| 				hasBlock: false, | ||||
| 				text:     []byte(r.Text)}, true | ||||
| 		} | ||||
| @@ -242,9 +212,34 @@ func (p *parser) getRef(refid string) (ref *reference, found bool) { | ||||
| 	return ref, found | ||||
| } | ||||
| 
 | ||||
| func (p *parser) isFootnote(ref *reference) bool { | ||||
| 	_, ok := p.notesRecord[string(ref.link)] | ||||
| 	return ok | ||||
| func (p *Markdown) finalize(block *Node) { | ||||
| 	above := block.Parent | ||||
| 	block.open = false | ||||
| 	p.tip = above | ||||
| } | ||||
| 
 | ||||
| func (p *Markdown) addChild(node NodeType, offset uint32) *Node { | ||||
| 	return p.addExistingChild(NewNode(node), offset) | ||||
| } | ||||
| 
 | ||||
| func (p *Markdown) addExistingChild(node *Node, offset uint32) *Node { | ||||
| 	for !p.tip.canContain(node.Type) { | ||||
| 		p.finalize(p.tip) | ||||
| 	} | ||||
| 	p.tip.AppendChild(node) | ||||
| 	p.tip = node | ||||
| 	return node | ||||
| } | ||||
| 
 | ||||
| func (p *Markdown) closeUnmatchedBlocks() { | ||||
| 	if !p.allClosed { | ||||
| 		for p.oldTip != p.lastMatchedContainer { | ||||
| 			parent := p.oldTip.Parent | ||||
| 			p.finalize(p.oldTip) | ||||
| 			p.oldTip = parent | ||||
| 		} | ||||
| 		p.allClosed = true | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // | ||||
| @@ -271,102 +266,27 @@ type Reference struct { | ||||
| // See the documentation in Options for more details on use-case. | ||||
| type ReferenceOverrideFunc func(reference string) (ref *Reference, overridden bool) | ||||
| 
 | ||||
| // Options represents configurable overrides and callbacks (in addition to the | ||||
| // extension flag set) for configuring a Markdown parse. | ||||
| type Options struct { | ||||
| 	// Extensions is a flag set of bit-wise ORed extension bits. See the | ||||
| 	// EXTENSION_* flags defined in this package. | ||||
| 	Extensions int | ||||
| 
 | ||||
| 	// ReferenceOverride is an optional function callback that is called every | ||||
| 	// time a reference is resolved. | ||||
| 	// | ||||
| 	// In Markdown, the link reference syntax can be made to resolve a link to | ||||
| 	// a reference instead of an inline URL, in one of the following ways: | ||||
| 	// | ||||
| 	//  * [link text][refid] | ||||
| 	//  * [refid][] | ||||
| 	// | ||||
| 	// Usually, the refid is defined at the bottom of the Markdown document. If | ||||
| 	// this override function is provided, the refid is passed to the override | ||||
| 	// function first, before consulting the defined refids at the bottom. If | ||||
| 	// the override function indicates an override did not occur, the refids at | ||||
| 	// the bottom will be used to fill in the link details. | ||||
| 	ReferenceOverride ReferenceOverrideFunc | ||||
| } | ||||
| 
 | ||||
| // MarkdownBasic is a convenience function for simple rendering. | ||||
| // It processes markdown input with no extensions enabled. | ||||
| func MarkdownBasic(input []byte) []byte { | ||||
| 	// set up the HTML renderer | ||||
| 	htmlFlags := HTML_USE_XHTML | ||||
| 	renderer := HtmlRenderer(htmlFlags, "", "") | ||||
| 
 | ||||
| 	// set up the parser | ||||
| 	return MarkdownOptions(input, renderer, Options{Extensions: 0}) | ||||
| } | ||||
| 
 | ||||
| // Call Markdown with most useful extensions enabled | ||||
| // MarkdownCommon is a convenience function for simple rendering. | ||||
| // It processes markdown input with common extensions enabled, including: | ||||
| // | ||||
| // * Smartypants processing with smart fractions and LaTeX dashes | ||||
| // | ||||
| // * Intra-word emphasis suppression | ||||
| // | ||||
| // * Tables | ||||
| // | ||||
| // * Fenced code blocks | ||||
| // | ||||
| // * Autolinking | ||||
| // | ||||
| // * Strikethrough support | ||||
| // | ||||
| // * Strict header parsing | ||||
| // | ||||
| // * Custom Header IDs | ||||
| func MarkdownCommon(input []byte) []byte { | ||||
| 	// set up the HTML renderer | ||||
| 	renderer := HtmlRenderer(commonHtmlFlags, "", "") | ||||
| 	return MarkdownOptions(input, renderer, Options{ | ||||
| 		Extensions: commonExtensions}) | ||||
| } | ||||
| 
 | ||||
| // Markdown is the main rendering function. | ||||
| // It parses and renders a block of markdown-encoded text. | ||||
| // The supplied Renderer is used to format the output, and extensions dictates | ||||
| // which non-standard extensions are enabled. | ||||
| // | ||||
| // To use the supplied Html or LaTeX renderers, see HtmlRenderer and | ||||
| // LatexRenderer, respectively. | ||||
| func Markdown(input []byte, renderer Renderer, extensions int) []byte { | ||||
| 	return MarkdownOptions(input, renderer, Options{ | ||||
| 		Extensions: extensions}) | ||||
| } | ||||
| 
 | ||||
| // MarkdownOptions is just like Markdown but takes additional options through | ||||
| // the Options struct. | ||||
| func MarkdownOptions(input []byte, renderer Renderer, opts Options) []byte { | ||||
| 	// no point in parsing if we can't render | ||||
| 	if renderer == nil { | ||||
| 		return nil | ||||
| // New constructs a Markdown processor. You can use the same With* functions as | ||||
| // for Run() to customize parser's behavior and the renderer. | ||||
| func New(opts ...Option) *Markdown { | ||||
| 	var p Markdown | ||||
| 	for _, opt := range opts { | ||||
| 		opt(&p) | ||||
| 	} | ||||
| 
 | ||||
| 	extensions := opts.Extensions | ||||
| 
 | ||||
| 	// fill in the render structure | ||||
| 	p := new(parser) | ||||
| 	p.r = renderer | ||||
| 	p.flags = extensions | ||||
| 	p.refOverride = opts.ReferenceOverride | ||||
| 	p.refs = make(map[string]*reference) | ||||
| 	p.maxNesting = 16 | ||||
| 	p.insideLink = false | ||||
| 
 | ||||
| 	docNode := NewNode(Document) | ||||
| 	p.doc = docNode | ||||
| 	p.tip = docNode | ||||
| 	p.oldTip = docNode | ||||
| 	p.lastMatchedContainer = docNode | ||||
| 	p.allClosed = true | ||||
| 	// register inline parsers | ||||
| 	p.inlineCallback[' '] = maybeLineBreak | ||||
| 	p.inlineCallback['*'] = emphasis | ||||
| 	p.inlineCallback['_'] = emphasis | ||||
| 	if extensions&EXTENSION_STRIKETHROUGH != 0 { | ||||
| 	if p.extensions&Strikethrough != 0 { | ||||
| 		p.inlineCallback['~'] = emphasis | ||||
| 	} | ||||
| 	p.inlineCallback['`'] = codeSpan | ||||
| @@ -375,116 +295,166 @@ func MarkdownOptions(input []byte, renderer Renderer, opts Options) []byte { | ||||
| 	p.inlineCallback['<'] = leftAngle | ||||
| 	p.inlineCallback['\\'] = escape | ||||
| 	p.inlineCallback['&'] = entity | ||||
| 
 | ||||
| 	if extensions&EXTENSION_AUTOLINK != 0 { | ||||
| 		p.inlineCallback[':'] = autoLink | ||||
| 	p.inlineCallback['!'] = maybeImage | ||||
| 	p.inlineCallback['^'] = maybeInlineFootnote | ||||
| 	if p.extensions&Autolink != 0 { | ||||
| 		p.inlineCallback['h'] = maybeAutoLink | ||||
| 		p.inlineCallback['m'] = maybeAutoLink | ||||
| 		p.inlineCallback['f'] = maybeAutoLink | ||||
| 		p.inlineCallback['H'] = maybeAutoLink | ||||
| 		p.inlineCallback['M'] = maybeAutoLink | ||||
| 		p.inlineCallback['F'] = maybeAutoLink | ||||
| 	} | ||||
| 
 | ||||
| 	if extensions&EXTENSION_FOOTNOTES != 0 { | ||||
| 	if p.extensions&Footnotes != 0 { | ||||
| 		p.notes = make([]*reference, 0) | ||||
| 		p.notesRecord = make(map[string]struct{}) | ||||
| 	} | ||||
| 
 | ||||
| 	first := firstPass(p, input) | ||||
| 	second := secondPass(p, first) | ||||
| 	return second | ||||
| 	return &p | ||||
| } | ||||
| 
 | ||||
| // first pass: | ||||
| // - normalize newlines | ||||
| // - extract references (outside of fenced code blocks) | ||||
| // - expand tabs (outside of fenced code blocks) | ||||
| // - copy everything else | ||||
| func firstPass(p *parser, input []byte) []byte { | ||||
| 	var out bytes.Buffer | ||||
| 	tabSize := TAB_SIZE_DEFAULT | ||||
| 	if p.flags&EXTENSION_TAB_SIZE_EIGHT != 0 { | ||||
| 		tabSize = TAB_SIZE_EIGHT | ||||
| // Option customizes the Markdown processor's default behavior. | ||||
| type Option func(*Markdown) | ||||
| 
 | ||||
| // WithRenderer allows you to override the default renderer. | ||||
| func WithRenderer(r Renderer) Option { | ||||
| 	return func(p *Markdown) { | ||||
| 		p.renderer = r | ||||
| 	} | ||||
| 	beg := 0 | ||||
| 	lastFencedCodeBlockEnd := 0 | ||||
| 	for beg < len(input) { | ||||
| 		// Find end of this line, then process the line. | ||||
| 		end := beg | ||||
| 		for end < len(input) && input[end] != '\n' && input[end] != '\r' { | ||||
| 			end++ | ||||
| 		} | ||||
| 
 | ||||
| 		if p.flags&EXTENSION_FENCED_CODE != 0 { | ||||
| 			// track fenced code block boundaries to suppress tab expansion | ||||
| 			// and reference extraction inside them: | ||||
| 			if beg >= lastFencedCodeBlockEnd { | ||||
| 				if i := p.fencedCodeBlock(&out, input[beg:], false); i > 0 { | ||||
| 					lastFencedCodeBlockEnd = beg + i | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 
 | ||||
| 		// add the line body if present | ||||
| 		if end > beg { | ||||
| 			if end < lastFencedCodeBlockEnd { // Do not expand tabs while inside fenced code blocks. | ||||
| 				out.Write(input[beg:end]) | ||||
| 			} else if refEnd := isReference(p, input[beg:], tabSize); refEnd > 0 { | ||||
| 				beg += refEnd | ||||
| 				continue | ||||
| 			} else { | ||||
| 				expandTabs(&out, input[beg:end], tabSize) | ||||
| 			} | ||||
| 		} | ||||
| 
 | ||||
| 		if end < len(input) && input[end] == '\r' { | ||||
| 			end++ | ||||
| 		} | ||||
| 		if end < len(input) && input[end] == '\n' { | ||||
| 			end++ | ||||
| 		} | ||||
| 		out.WriteByte('\n') | ||||
| 
 | ||||
| 		beg = end | ||||
| 	} | ||||
| 
 | ||||
| 	// empty input? | ||||
| 	if out.Len() == 0 { | ||||
| 		out.WriteByte('\n') | ||||
| 	} | ||||
| 
 | ||||
| 	return out.Bytes() | ||||
| } | ||||
| 
 | ||||
| // second pass: actual rendering | ||||
| func secondPass(p *parser, input []byte) []byte { | ||||
| 	var output bytes.Buffer | ||||
| // WithExtensions allows you to pick some of the many extensions provided by | ||||
| // Blackfriday. You can bitwise OR them. | ||||
| func WithExtensions(e Extensions) Option { | ||||
| 	return func(p *Markdown) { | ||||
| 		p.extensions = e | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| 	p.r.DocumentHeader(&output) | ||||
| 	p.block(&output, input) | ||||
| 
 | ||||
| 	if p.flags&EXTENSION_FOOTNOTES != 0 && len(p.notes) > 0 { | ||||
| 		p.r.Footnotes(&output, func() bool { | ||||
| 			flags := LIST_ITEM_BEGINNING_OF_LIST | ||||
| 			for i := 0; i < len(p.notes); i += 1 { | ||||
| 				ref := p.notes[i] | ||||
| 				var buf bytes.Buffer | ||||
| 				if ref.hasBlock { | ||||
| 					flags |= LIST_ITEM_CONTAINS_BLOCK | ||||
| 					p.block(&buf, ref.title) | ||||
| 				} else { | ||||
| 					p.inline(&buf, ref.title) | ||||
| 				} | ||||
| 				p.r.FootnoteItem(&output, ref.link, buf.Bytes(), flags) | ||||
| 				flags &^= LIST_ITEM_BEGINNING_OF_LIST | LIST_ITEM_CONTAINS_BLOCK | ||||
| 			} | ||||
| 
 | ||||
| 			return true | ||||
| // WithNoExtensions turns off all extensions and custom behavior. | ||||
| func WithNoExtensions() Option { | ||||
| 	return func(p *Markdown) { | ||||
| 		p.extensions = NoExtensions | ||||
| 		p.renderer = NewHTMLRenderer(HTMLRendererParameters{ | ||||
| 			Flags: HTMLFlagsNone, | ||||
| 		}) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| 	p.r.DocumentFooter(&output) | ||||
| 
 | ||||
| 	if p.nesting != 0 { | ||||
| 		panic("Nesting level did not end at zero") | ||||
| // WithRefOverride sets an optional function callback that is called every | ||||
| // time a reference is resolved. | ||||
| // | ||||
| // In Markdown, the link reference syntax can be made to resolve a link to | ||||
| // a reference instead of an inline URL, in one of the following ways: | ||||
| // | ||||
| //  * [link text][refid] | ||||
| //  * [refid][] | ||||
| // | ||||
| // Usually, the refid is defined at the bottom of the Markdown document. If | ||||
| // this override function is provided, the refid is passed to the override | ||||
| // function first, before consulting the defined refids at the bottom. If | ||||
| // the override function indicates an override did not occur, the refids at | ||||
| // the bottom will be used to fill in the link details. | ||||
| func WithRefOverride(o ReferenceOverrideFunc) Option { | ||||
| 	return func(p *Markdown) { | ||||
| 		p.referenceOverride = o | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| 	return output.Bytes() | ||||
| // Run is the main entry point to Blackfriday. It parses and renders a | ||||
| // block of markdown-encoded text. | ||||
| // | ||||
| // The simplest invocation of Run takes one argument, input: | ||||
| //     output := Run(input) | ||||
| // This will parse the input with CommonExtensions enabled and render it with | ||||
| // the default HTMLRenderer (with CommonHTMLFlags). | ||||
| // | ||||
| // Variadic arguments opts can customize the default behavior. Since Markdown | ||||
| // type does not contain exported fields, you can not use it directly. Instead, | ||||
| // use the With* functions. For example, this will call the most basic | ||||
| // functionality, with no extensions: | ||||
| //     output := Run(input, WithNoExtensions()) | ||||
| // | ||||
| // You can use any number of With* arguments, even contradicting ones. They | ||||
| // will be applied in order of appearance and the latter will override the | ||||
| // former: | ||||
| //     output := Run(input, WithNoExtensions(), WithExtensions(exts), | ||||
| //         WithRenderer(yourRenderer)) | ||||
| func Run(input []byte, opts ...Option) []byte { | ||||
| 	r := NewHTMLRenderer(HTMLRendererParameters{ | ||||
| 		Flags: CommonHTMLFlags, | ||||
| 	}) | ||||
| 	optList := []Option{WithRenderer(r), WithExtensions(CommonExtensions)} | ||||
| 	optList = append(optList, opts...) | ||||
| 	parser := New(optList...) | ||||
| 	ast := parser.Parse(input) | ||||
| 	var buf bytes.Buffer | ||||
| 	parser.renderer.RenderHeader(&buf, ast) | ||||
| 	ast.Walk(func(node *Node, entering bool) WalkStatus { | ||||
| 		return parser.renderer.RenderNode(&buf, node, entering) | ||||
| 	}) | ||||
| 	parser.renderer.RenderFooter(&buf, ast) | ||||
| 	return buf.Bytes() | ||||
| } | ||||
| 
 | ||||
| // Parse is an entry point to the parsing part of Blackfriday. It takes an | ||||
| // input markdown document and produces a syntax tree for its contents. This | ||||
| // tree can then be rendered with a default or custom renderer, or | ||||
| // analyzed/transformed by the caller to whatever non-standard needs they have. | ||||
| // The return value is the root node of the syntax tree. | ||||
| func (p *Markdown) Parse(input []byte) *Node { | ||||
| 	p.block(input) | ||||
| 	// Walk the tree and finish up some of unfinished blocks | ||||
| 	for p.tip != nil { | ||||
| 		p.finalize(p.tip) | ||||
| 	} | ||||
| 	// Walk the tree again and process inline markdown in each block | ||||
| 	p.doc.Walk(func(node *Node, entering bool) WalkStatus { | ||||
| 		if node.Type == Paragraph || node.Type == Heading || node.Type == TableCell { | ||||
| 			p.inline(node, node.content) | ||||
| 			node.content = nil | ||||
| 		} | ||||
| 		return GoToNext | ||||
| 	}) | ||||
| 	p.parseRefsToAST() | ||||
| 	return p.doc | ||||
| } | ||||
| 
 | ||||
| func (p *Markdown) parseRefsToAST() { | ||||
| 	if p.extensions&Footnotes == 0 || len(p.notes) == 0 { | ||||
| 		return | ||||
| 	} | ||||
| 	p.tip = p.doc | ||||
| 	block := p.addBlock(List, nil) | ||||
| 	block.IsFootnotesList = true | ||||
| 	block.ListFlags = ListTypeOrdered | ||||
| 	flags := ListItemBeginningOfList | ||||
| 	// Note: this loop is intentionally explicit, not range-form. This is | ||||
| 	// because the body of the loop will append nested footnotes to p.notes and | ||||
| 	// we need to process those late additions. Range form would only walk over | ||||
| 	// the fixed initial set. | ||||
| 	for i := 0; i < len(p.notes); i++ { | ||||
| 		ref := p.notes[i] | ||||
| 		p.addExistingChild(ref.footnote, 0) | ||||
| 		block := ref.footnote | ||||
| 		block.ListFlags = flags | ListTypeOrdered | ||||
| 		block.RefLink = ref.link | ||||
| 		if ref.hasBlock { | ||||
| 			flags |= ListItemContainsBlock | ||||
| 			p.block(ref.title) | ||||
| 		} else { | ||||
| 			p.inline(block, ref.title) | ||||
| 		} | ||||
| 		flags &^= ListItemBeginningOfList | ListItemContainsBlock | ||||
| 	} | ||||
| 	above := block.Parent | ||||
| 	finalizeList(block) | ||||
| 	p.tip = above | ||||
| 	block.Walk(func(node *Node, entering bool) WalkStatus { | ||||
| 		if node.Type == Paragraph || node.Type == Heading { | ||||
| 			p.inline(node, node.content) | ||||
| 			node.content = nil | ||||
| 		} | ||||
| 		return GoToNext | ||||
| 	}) | ||||
| } | ||||
| 
 | ||||
| // | ||||
| @@ -510,24 +480,62 @@ func secondPass(p *parser, input []byte) []byte { | ||||
| //    [^note]: This is the explanation. | ||||
| // | ||||
| // Footnotes should be placed at the end of the document in an ordered list. | ||||
| // Inline footnotes such as: | ||||
| // Finally, there are inline footnotes such as: | ||||
| // | ||||
| //    Inline footnotes^[Not supported.] also exist. | ||||
| //    Inline footnotes^[Also supported.] provide a quick inline explanation, | ||||
| //    but are rendered at the bottom of the document. | ||||
| // | ||||
| // are not yet supported. | ||||
| 
 | ||||
| // References are parsed and stored in this struct. | ||||
| // reference holds all information necessary for a reference-style links or | ||||
| // footnotes. | ||||
| // | ||||
| // Consider this markdown with reference-style links: | ||||
| // | ||||
| //     [link][ref] | ||||
| // | ||||
| //     [ref]: /url/ "tooltip title" | ||||
| // | ||||
| // It will be ultimately converted to this HTML: | ||||
| // | ||||
| //     <p><a href=\"/url/\" title=\"title\">link</a></p> | ||||
| // | ||||
| // And a reference structure will be populated as follows: | ||||
| // | ||||
| //     p.refs["ref"] = &reference{ | ||||
| //         link: "/url/", | ||||
| //         title: "tooltip title", | ||||
| //     } | ||||
| // | ||||
| // Alternatively, reference can contain information about a footnote. Consider | ||||
| // this markdown: | ||||
| // | ||||
| //     Text needing a footnote.[^a] | ||||
| // | ||||
| //     [^a]: This is the note | ||||
| // | ||||
| // A reference structure will be populated as follows: | ||||
| // | ||||
| //     p.refs["a"] = &reference{ | ||||
| //         link: "a", | ||||
| //         title: "This is the note", | ||||
| //         noteID: <some positive int>, | ||||
| //     } | ||||
| // | ||||
| // TODO: As you can see, it begs for splitting into two dedicated structures | ||||
| // for refs and for footnotes. | ||||
| type reference struct { | ||||
| 	link     []byte | ||||
| 	title    []byte | ||||
| 	noteId   int // 0 if not a footnote ref | ||||
| 	noteID   int // 0 if not a footnote ref | ||||
| 	hasBlock bool | ||||
| 	text     []byte | ||||
| 	footnote *Node // a link to the Item node within a list of footnotes | ||||
| 
 | ||||
| 	text []byte // only gets populated by refOverride feature with Reference.Text | ||||
| } | ||||
| 
 | ||||
| func (r *reference) String() string { | ||||
| 	return fmt.Sprintf("{link: %q, title: %q, text: %q, noteId: %d, hasBlock: %v}", | ||||
| 		r.link, r.title, r.text, r.noteId, r.hasBlock) | ||||
| 	return fmt.Sprintf("{link: %q, title: %q, text: %q, noteID: %d, hasBlock: %v}", | ||||
| 		r.link, r.title, r.text, r.noteID, r.hasBlock) | ||||
| } | ||||
| 
 | ||||
| // Check whether or not data starts with a reference link. | ||||
| @@ -535,7 +543,7 @@ func (r *reference) String() string { | ||||
| // (in the render struct). | ||||
| // Returns the number of bytes to skip to move past it, | ||||
| // or zero if the first line is not a reference. | ||||
| func isReference(p *parser, data []byte, tabSize int) int { | ||||
| func isReference(p *Markdown, data []byte, tabSize int) int { | ||||
| 	// up to 3 optional leading spaces | ||||
| 	if len(data) < 4 { | ||||
| 		return 0 | ||||
| @@ -545,18 +553,18 @@ func isReference(p *parser, data []byte, tabSize int) int { | ||||
| 		i++ | ||||
| 	} | ||||
| 
 | ||||
| 	noteId := 0 | ||||
| 	noteID := 0 | ||||
| 
 | ||||
| 	// id part: anything but a newline between brackets | ||||
| 	if data[i] != '[' { | ||||
| 		return 0 | ||||
| 	} | ||||
| 	i++ | ||||
| 	if p.flags&EXTENSION_FOOTNOTES != 0 { | ||||
| 	if p.extensions&Footnotes != 0 { | ||||
| 		if i < len(data) && data[i] == '^' { | ||||
| 			// we can set it to anything here because the proper noteIds will | ||||
| 			// be assigned later during the second pass. It just has to be != 0 | ||||
| 			noteId = 1 | ||||
| 			noteID = 1 | ||||
| 			i++ | ||||
| 		} | ||||
| 	} | ||||
| @@ -568,7 +576,11 @@ func isReference(p *parser, data []byte, tabSize int) int { | ||||
| 		return 0 | ||||
| 	} | ||||
| 	idEnd := i | ||||
| 
 | ||||
| 	// footnotes can have empty ID, like this: [^], but a reference can not be | ||||
| 	// empty like this: []. Break early if it's not a footnote and there's no ID | ||||
| 	if noteID == 0 && idOffset == idEnd { | ||||
| 		return 0 | ||||
| 	} | ||||
| 	// spacer: colon (space | tab)* newline? (space | tab)* | ||||
| 	i++ | ||||
| 	if i >= len(data) || data[i] != ':' { | ||||
| @@ -599,7 +611,7 @@ func isReference(p *parser, data []byte, tabSize int) int { | ||||
| 		hasBlock              bool | ||||
| 	) | ||||
| 
 | ||||
| 	if p.flags&EXTENSION_FOOTNOTES != 0 && noteId != 0 { | ||||
| 	if p.extensions&Footnotes != 0 && noteID != 0 { | ||||
| 		linkOffset, linkEnd, raw, hasBlock = scanFootnote(p, data, i, tabSize) | ||||
| 		lineEnd = linkEnd | ||||
| 	} else { | ||||
| @@ -612,11 +624,11 @@ func isReference(p *parser, data []byte, tabSize int) int { | ||||
| 	// a valid ref has been found | ||||
| 
 | ||||
| 	ref := &reference{ | ||||
| 		noteId:   noteId, | ||||
| 		noteID:   noteID, | ||||
| 		hasBlock: hasBlock, | ||||
| 	} | ||||
| 
 | ||||
| 	if noteId > 0 { | ||||
| 	if noteID > 0 { | ||||
| 		// reusing the link field for the id since footnotes don't have links | ||||
| 		ref.link = data[idOffset:idEnd] | ||||
| 		// if footnote, it's not really a title, it's the contained text | ||||
| @@ -634,15 +646,12 @@ func isReference(p *parser, data []byte, tabSize int) int { | ||||
| 	return lineEnd | ||||
| } | ||||
| 
 | ||||
| func scanLinkRef(p *parser, data []byte, i int) (linkOffset, linkEnd, titleOffset, titleEnd, lineEnd int) { | ||||
| func scanLinkRef(p *Markdown, data []byte, i int) (linkOffset, linkEnd, titleOffset, titleEnd, lineEnd int) { | ||||
| 	// link: whitespace-free sequence, optionally between angle brackets | ||||
| 	if data[i] == '<' { | ||||
| 		i++ | ||||
| 	} | ||||
| 	linkOffset = i | ||||
| 	if i == len(data) { | ||||
| 		return | ||||
| 	} | ||||
| 	for i < len(data) && data[i] != ' ' && data[i] != '\t' && data[i] != '\n' && data[i] != '\r' { | ||||
| 		i++ | ||||
| 	} | ||||
| @@ -705,13 +714,13 @@ func scanLinkRef(p *parser, data []byte, i int) (linkOffset, linkEnd, titleOffse | ||||
| 	return | ||||
| } | ||||
| 
 | ||||
| // The first bit of this logic is the same as (*parser).listItem, but the rest | ||||
| // The first bit of this logic is the same as Parser.listItem, but the rest | ||||
| // is much simpler. This function simply finds the entire block and shifts it | ||||
| // over by one tab if it is indeed a block (just returns the line if it's not). | ||||
| // blockEnd is the end of the section in the input buffer, and contents is the | ||||
| // extracted text that was shifted over one tab. It will need to be rendered at | ||||
| // the end of the document. | ||||
| func scanFootnote(p *parser, data []byte, i, indentSize int) (blockStart, blockEnd int, contents []byte, hasBlock bool) { | ||||
| func scanFootnote(p *Markdown, data []byte, i, indentSize int) (blockStart, blockEnd int, contents []byte, hasBlock bool) { | ||||
| 	if i == 0 || len(data) == 0 { | ||||
| 		return | ||||
| 	} | ||||
| @@ -812,7 +821,7 @@ func ishorizontalspace(c byte) bool { | ||||
| 	return c == ' ' || c == '\t' | ||||
| } | ||||
| 
 | ||||
| // Test if a character is a vertical whitespace character. | ||||
| // Test if a character is a vertical character. | ||||
| func isverticalspace(c byte) bool { | ||||
| 	return c == '\n' || c == '\r' || c == '\f' || c == '\v' | ||||
| } | ||||
							
								
								
									
										354
									
								
								vendor/github.com/russross/blackfriday/v2/node.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										354
									
								
								vendor/github.com/russross/blackfriday/v2/node.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,354 @@ | ||||
| package blackfriday | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"fmt" | ||||
| ) | ||||
|  | ||||
| // NodeType specifies a type of a single node of a syntax tree. Usually one | ||||
| // node (and its type) corresponds to a single markdown feature, e.g. emphasis | ||||
| // or code block. | ||||
| type NodeType int | ||||
|  | ||||
| // Constants for identifying different types of nodes. See NodeType. | ||||
| const ( | ||||
| 	Document NodeType = iota | ||||
| 	BlockQuote | ||||
| 	List | ||||
| 	Item | ||||
| 	Paragraph | ||||
| 	Heading | ||||
| 	HorizontalRule | ||||
| 	Emph | ||||
| 	Strong | ||||
| 	Del | ||||
| 	Link | ||||
| 	Image | ||||
| 	Text | ||||
| 	HTMLBlock | ||||
| 	CodeBlock | ||||
| 	Softbreak | ||||
| 	Hardbreak | ||||
| 	Code | ||||
| 	HTMLSpan | ||||
| 	Table | ||||
| 	TableCell | ||||
| 	TableHead | ||||
| 	TableBody | ||||
| 	TableRow | ||||
| ) | ||||
|  | ||||
| var nodeTypeNames = []string{ | ||||
| 	Document:       "Document", | ||||
| 	BlockQuote:     "BlockQuote", | ||||
| 	List:           "List", | ||||
| 	Item:           "Item", | ||||
| 	Paragraph:      "Paragraph", | ||||
| 	Heading:        "Heading", | ||||
| 	HorizontalRule: "HorizontalRule", | ||||
| 	Emph:           "Emph", | ||||
| 	Strong:         "Strong", | ||||
| 	Del:            "Del", | ||||
| 	Link:           "Link", | ||||
| 	Image:          "Image", | ||||
| 	Text:           "Text", | ||||
| 	HTMLBlock:      "HTMLBlock", | ||||
| 	CodeBlock:      "CodeBlock", | ||||
| 	Softbreak:      "Softbreak", | ||||
| 	Hardbreak:      "Hardbreak", | ||||
| 	Code:           "Code", | ||||
| 	HTMLSpan:       "HTMLSpan", | ||||
| 	Table:          "Table", | ||||
| 	TableCell:      "TableCell", | ||||
| 	TableHead:      "TableHead", | ||||
| 	TableBody:      "TableBody", | ||||
| 	TableRow:       "TableRow", | ||||
| } | ||||
|  | ||||
| func (t NodeType) String() string { | ||||
| 	return nodeTypeNames[t] | ||||
| } | ||||
|  | ||||
| // ListData contains fields relevant to a List and Item node type. | ||||
| type ListData struct { | ||||
| 	ListFlags       ListType | ||||
| 	Tight           bool   // Skip <p>s around list item data if true | ||||
| 	BulletChar      byte   // '*', '+' or '-' in bullet lists | ||||
| 	Delimiter       byte   // '.' or ')' after the number in ordered lists | ||||
| 	RefLink         []byte // If not nil, turns this list item into a footnote item and triggers different rendering | ||||
| 	IsFootnotesList bool   // This is a list of footnotes | ||||
| } | ||||
|  | ||||
| // LinkData contains fields relevant to a Link node type. | ||||
| type LinkData struct { | ||||
| 	Destination []byte // Destination is what goes into a href | ||||
| 	Title       []byte // Title is the tooltip thing that goes in a title attribute | ||||
| 	NoteID      int    // NoteID contains a serial number of a footnote, zero if it's not a footnote | ||||
| 	Footnote    *Node  // If it's a footnote, this is a direct link to the footnote Node. Otherwise nil. | ||||
| } | ||||
|  | ||||
| // CodeBlockData contains fields relevant to a CodeBlock node type. | ||||
| type CodeBlockData struct { | ||||
| 	IsFenced    bool   // Specifies whether it's a fenced code block or an indented one | ||||
| 	Info        []byte // This holds the info string | ||||
| 	FenceChar   byte | ||||
| 	FenceLength int | ||||
| 	FenceOffset int | ||||
| } | ||||
|  | ||||
| // TableCellData contains fields relevant to a TableCell node type. | ||||
| type TableCellData struct { | ||||
| 	IsHeader bool           // This tells if it's under the header row | ||||
| 	Align    CellAlignFlags // This holds the value for align attribute | ||||
| } | ||||
|  | ||||
| // HeadingData contains fields relevant to a Heading node type. | ||||
| type HeadingData struct { | ||||
| 	Level        int    // This holds the heading level number | ||||
| 	HeadingID    string // This might hold heading ID, if present | ||||
| 	IsTitleblock bool   // Specifies whether it's a title block | ||||
| } | ||||
|  | ||||
| // Node is a single element in the abstract syntax tree of the parsed document. | ||||
| // It holds connections to the structurally neighboring nodes and, for certain | ||||
| // types of nodes, additional information that might be needed when rendering. | ||||
| type Node struct { | ||||
| 	Type       NodeType // Determines the type of the node | ||||
| 	Parent     *Node    // Points to the parent | ||||
| 	FirstChild *Node    // Points to the first child, if any | ||||
| 	LastChild  *Node    // Points to the last child, if any | ||||
| 	Prev       *Node    // Previous sibling; nil if it's the first child | ||||
| 	Next       *Node    // Next sibling; nil if it's the last child | ||||
|  | ||||
| 	Literal []byte // Text contents of the leaf nodes | ||||
|  | ||||
| 	HeadingData   // Populated if Type is Heading | ||||
| 	ListData      // Populated if Type is List | ||||
| 	CodeBlockData // Populated if Type is CodeBlock | ||||
| 	LinkData      // Populated if Type is Link | ||||
| 	TableCellData // Populated if Type is TableCell | ||||
|  | ||||
| 	content []byte // Markdown content of the block nodes | ||||
| 	open    bool   // Specifies an open block node that has not been finished to process yet | ||||
| } | ||||
|  | ||||
| // NewNode allocates a node of a specified type. | ||||
| func NewNode(typ NodeType) *Node { | ||||
| 	return &Node{ | ||||
| 		Type: typ, | ||||
| 		open: true, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (n *Node) String() string { | ||||
| 	ellipsis := "" | ||||
| 	snippet := n.Literal | ||||
| 	if len(snippet) > 16 { | ||||
| 		snippet = snippet[:16] | ||||
| 		ellipsis = "..." | ||||
| 	} | ||||
| 	return fmt.Sprintf("%s: '%s%s'", n.Type, snippet, ellipsis) | ||||
| } | ||||
|  | ||||
| // Unlink removes node 'n' from the tree. | ||||
| // It panics if the node is nil. | ||||
| func (n *Node) Unlink() { | ||||
| 	if n.Prev != nil { | ||||
| 		n.Prev.Next = n.Next | ||||
| 	} else if n.Parent != nil { | ||||
| 		n.Parent.FirstChild = n.Next | ||||
| 	} | ||||
| 	if n.Next != nil { | ||||
| 		n.Next.Prev = n.Prev | ||||
| 	} else if n.Parent != nil { | ||||
| 		n.Parent.LastChild = n.Prev | ||||
| 	} | ||||
| 	n.Parent = nil | ||||
| 	n.Next = nil | ||||
| 	n.Prev = nil | ||||
| } | ||||
|  | ||||
| // AppendChild adds a node 'child' as a child of 'n'. | ||||
| // It panics if either node is nil. | ||||
| func (n *Node) AppendChild(child *Node) { | ||||
| 	child.Unlink() | ||||
| 	child.Parent = n | ||||
| 	if n.LastChild != nil { | ||||
| 		n.LastChild.Next = child | ||||
| 		child.Prev = n.LastChild | ||||
| 		n.LastChild = child | ||||
| 	} else { | ||||
| 		n.FirstChild = child | ||||
| 		n.LastChild = child | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // InsertBefore inserts 'sibling' immediately before 'n'. | ||||
| // It panics if either node is nil. | ||||
| func (n *Node) InsertBefore(sibling *Node) { | ||||
| 	sibling.Unlink() | ||||
| 	sibling.Prev = n.Prev | ||||
| 	if sibling.Prev != nil { | ||||
| 		sibling.Prev.Next = sibling | ||||
| 	} | ||||
| 	sibling.Next = n | ||||
| 	n.Prev = sibling | ||||
| 	sibling.Parent = n.Parent | ||||
| 	if sibling.Prev == nil { | ||||
| 		sibling.Parent.FirstChild = sibling | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (n *Node) isContainer() bool { | ||||
| 	switch n.Type { | ||||
| 	case Document: | ||||
| 		fallthrough | ||||
| 	case BlockQuote: | ||||
| 		fallthrough | ||||
| 	case List: | ||||
| 		fallthrough | ||||
| 	case Item: | ||||
| 		fallthrough | ||||
| 	case Paragraph: | ||||
| 		fallthrough | ||||
| 	case Heading: | ||||
| 		fallthrough | ||||
| 	case Emph: | ||||
| 		fallthrough | ||||
| 	case Strong: | ||||
| 		fallthrough | ||||
| 	case Del: | ||||
| 		fallthrough | ||||
| 	case Link: | ||||
| 		fallthrough | ||||
| 	case Image: | ||||
| 		fallthrough | ||||
| 	case Table: | ||||
| 		fallthrough | ||||
| 	case TableHead: | ||||
| 		fallthrough | ||||
| 	case TableBody: | ||||
| 		fallthrough | ||||
| 	case TableRow: | ||||
| 		fallthrough | ||||
| 	case TableCell: | ||||
| 		return true | ||||
| 	default: | ||||
| 		return false | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (n *Node) canContain(t NodeType) bool { | ||||
| 	if n.Type == List { | ||||
| 		return t == Item | ||||
| 	} | ||||
| 	if n.Type == Document || n.Type == BlockQuote || n.Type == Item { | ||||
| 		return t != Item | ||||
| 	} | ||||
| 	if n.Type == Table { | ||||
| 		return t == TableHead || t == TableBody | ||||
| 	} | ||||
| 	if n.Type == TableHead || n.Type == TableBody { | ||||
| 		return t == TableRow | ||||
| 	} | ||||
| 	if n.Type == TableRow { | ||||
| 		return t == TableCell | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| // WalkStatus allows NodeVisitor to have some control over the tree traversal. | ||||
| // It is returned from NodeVisitor and different values allow Node.Walk to | ||||
| // decide which node to go to next. | ||||
| type WalkStatus int | ||||
|  | ||||
| const ( | ||||
| 	// GoToNext is the default traversal of every node. | ||||
| 	GoToNext WalkStatus = iota | ||||
| 	// SkipChildren tells walker to skip all children of current node. | ||||
| 	SkipChildren | ||||
| 	// Terminate tells walker to terminate the traversal. | ||||
| 	Terminate | ||||
| ) | ||||
|  | ||||
| // NodeVisitor is a callback to be called when traversing the syntax tree. | ||||
| // Called twice for every node: once with entering=true when the branch is | ||||
| // first visited, then with entering=false after all the children are done. | ||||
| type NodeVisitor func(node *Node, entering bool) WalkStatus | ||||
|  | ||||
| // Walk is a convenience method that instantiates a walker and starts a | ||||
| // traversal of subtree rooted at n. | ||||
| func (n *Node) Walk(visitor NodeVisitor) { | ||||
| 	w := newNodeWalker(n) | ||||
| 	for w.current != nil { | ||||
| 		status := visitor(w.current, w.entering) | ||||
| 		switch status { | ||||
| 		case GoToNext: | ||||
| 			w.next() | ||||
| 		case SkipChildren: | ||||
| 			w.entering = false | ||||
| 			w.next() | ||||
| 		case Terminate: | ||||
| 			return | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| type nodeWalker struct { | ||||
| 	current  *Node | ||||
| 	root     *Node | ||||
| 	entering bool | ||||
| } | ||||
|  | ||||
| func newNodeWalker(root *Node) *nodeWalker { | ||||
| 	return &nodeWalker{ | ||||
| 		current:  root, | ||||
| 		root:     root, | ||||
| 		entering: true, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (nw *nodeWalker) next() { | ||||
| 	if (!nw.current.isContainer() || !nw.entering) && nw.current == nw.root { | ||||
| 		nw.current = nil | ||||
| 		return | ||||
| 	} | ||||
| 	if nw.entering && nw.current.isContainer() { | ||||
| 		if nw.current.FirstChild != nil { | ||||
| 			nw.current = nw.current.FirstChild | ||||
| 			nw.entering = true | ||||
| 		} else { | ||||
| 			nw.entering = false | ||||
| 		} | ||||
| 	} else if nw.current.Next == nil { | ||||
| 		nw.current = nw.current.Parent | ||||
| 		nw.entering = false | ||||
| 	} else { | ||||
| 		nw.current = nw.current.Next | ||||
| 		nw.entering = true | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func dump(ast *Node) { | ||||
| 	fmt.Println(dumpString(ast)) | ||||
| } | ||||
|  | ||||
| func dumpR(ast *Node, depth int) string { | ||||
| 	if ast == nil { | ||||
| 		return "" | ||||
| 	} | ||||
| 	indent := bytes.Repeat([]byte("\t"), depth) | ||||
| 	content := ast.Literal | ||||
| 	if content == nil { | ||||
| 		content = ast.content | ||||
| 	} | ||||
| 	result := fmt.Sprintf("%s%s(%q)\n", indent, ast.Type, content) | ||||
| 	for n := ast.FirstChild; n != nil; n = n.Next { | ||||
| 		result += dumpR(n, depth+1) | ||||
| 	} | ||||
| 	return result | ||||
| } | ||||
|  | ||||
| func dumpString(ast *Node) string { | ||||
| 	return dumpR(ast, 0) | ||||
| } | ||||
| @@ -17,11 +17,14 @@ package blackfriday | ||||
| 
 | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"io" | ||||
| ) | ||||
| 
 | ||||
| type smartypantsData struct { | ||||
| // SPRenderer is a struct containing state of a Smartypants renderer. | ||||
| type SPRenderer struct { | ||||
| 	inSingleQuote bool | ||||
| 	inDoubleQuote bool | ||||
| 	callbacks     [256]smartCallback | ||||
| } | ||||
| 
 | ||||
| func wordBoundary(c byte) bool { | ||||
| @@ -118,7 +121,7 @@ func smartQuoteHelper(out *bytes.Buffer, previousChar byte, nextChar byte, quote | ||||
| 	return true | ||||
| } | ||||
| 
 | ||||
| func smartSingleQuote(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int { | ||||
| func (r *SPRenderer) smartSingleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { | ||||
| 	if len(text) >= 2 { | ||||
| 		t1 := tolower(text[1]) | ||||
| 
 | ||||
| @@ -127,7 +130,7 @@ func smartSingleQuote(out *bytes.Buffer, smrt *smartypantsData, previousChar byt | ||||
| 			if len(text) >= 3 { | ||||
| 				nextChar = text[2] | ||||
| 			} | ||||
| 			if smartQuoteHelper(out, previousChar, nextChar, 'd', &smrt.inDoubleQuote, false) { | ||||
| 			if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) { | ||||
| 				return 1 | ||||
| 			} | ||||
| 		} | ||||
| @@ -152,7 +155,7 @@ func smartSingleQuote(out *bytes.Buffer, smrt *smartypantsData, previousChar byt | ||||
| 	if len(text) > 1 { | ||||
| 		nextChar = text[1] | ||||
| 	} | ||||
| 	if smartQuoteHelper(out, previousChar, nextChar, 's', &smrt.inSingleQuote, false) { | ||||
| 	if smartQuoteHelper(out, previousChar, nextChar, 's', &r.inSingleQuote, false) { | ||||
| 		return 0 | ||||
| 	} | ||||
| 
 | ||||
| @@ -160,7 +163,7 @@ func smartSingleQuote(out *bytes.Buffer, smrt *smartypantsData, previousChar byt | ||||
| 	return 0 | ||||
| } | ||||
| 
 | ||||
| func smartParens(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int { | ||||
| func (r *SPRenderer) smartParens(out *bytes.Buffer, previousChar byte, text []byte) int { | ||||
| 	if len(text) >= 3 { | ||||
| 		t1 := tolower(text[1]) | ||||
| 		t2 := tolower(text[2]) | ||||
| @@ -185,7 +188,7 @@ func smartParens(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, te | ||||
| 	return 0 | ||||
| } | ||||
| 
 | ||||
| func smartDash(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int { | ||||
| func (r *SPRenderer) smartDash(out *bytes.Buffer, previousChar byte, text []byte) int { | ||||
| 	if len(text) >= 2 { | ||||
| 		if text[1] == '-' { | ||||
| 			out.WriteString("—") | ||||
| @@ -202,7 +205,7 @@ func smartDash(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text | ||||
| 	return 0 | ||||
| } | ||||
| 
 | ||||
| func smartDashLatex(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int { | ||||
| func (r *SPRenderer) smartDashLatex(out *bytes.Buffer, previousChar byte, text []byte) int { | ||||
| 	if len(text) >= 3 && text[1] == '-' && text[2] == '-' { | ||||
| 		out.WriteString("—") | ||||
| 		return 2 | ||||
| @@ -216,13 +219,13 @@ func smartDashLatex(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, | ||||
| 	return 0 | ||||
| } | ||||
| 
 | ||||
| func smartAmpVariant(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte, quote byte, addNBSP bool) int { | ||||
| func (r *SPRenderer) smartAmpVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte, addNBSP bool) int { | ||||
| 	if bytes.HasPrefix(text, []byte(""")) { | ||||
| 		nextChar := byte(0) | ||||
| 		if len(text) >= 7 { | ||||
| 			nextChar = text[6] | ||||
| 		} | ||||
| 		if smartQuoteHelper(out, previousChar, nextChar, quote, &smrt.inDoubleQuote, addNBSP) { | ||||
| 		if smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, addNBSP) { | ||||
| 			return 5 | ||||
| 		} | ||||
| 	} | ||||
| @@ -235,18 +238,18 @@ func smartAmpVariant(out *bytes.Buffer, smrt *smartypantsData, previousChar byte | ||||
| 	return 0 | ||||
| } | ||||
| 
 | ||||
| func smartAmp(angledQuotes, addNBSP bool) func(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int { | ||||
| func (r *SPRenderer) smartAmp(angledQuotes, addNBSP bool) func(*bytes.Buffer, byte, []byte) int { | ||||
| 	var quote byte = 'd' | ||||
| 	if angledQuotes { | ||||
| 		quote = 'a' | ||||
| 	} | ||||
| 
 | ||||
| 	return func(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int { | ||||
| 		return smartAmpVariant(out, smrt, previousChar, text, quote, addNBSP) | ||||
| 	return func(out *bytes.Buffer, previousChar byte, text []byte) int { | ||||
| 		return r.smartAmpVariant(out, previousChar, text, quote, addNBSP) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func smartPeriod(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int { | ||||
| func (r *SPRenderer) smartPeriod(out *bytes.Buffer, previousChar byte, text []byte) int { | ||||
| 	if len(text) >= 3 && text[1] == '.' && text[2] == '.' { | ||||
| 		out.WriteString("…") | ||||
| 		return 2 | ||||
| @@ -261,13 +264,13 @@ func smartPeriod(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, te | ||||
| 	return 0 | ||||
| } | ||||
| 
 | ||||
| func smartBacktick(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int { | ||||
| func (r *SPRenderer) smartBacktick(out *bytes.Buffer, previousChar byte, text []byte) int { | ||||
| 	if len(text) >= 2 && text[1] == '`' { | ||||
| 		nextChar := byte(0) | ||||
| 		if len(text) >= 3 { | ||||
| 			nextChar = text[2] | ||||
| 		} | ||||
| 		if smartQuoteHelper(out, previousChar, nextChar, 'd', &smrt.inDoubleQuote, false) { | ||||
| 		if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) { | ||||
| 			return 1 | ||||
| 		} | ||||
| 	} | ||||
| @@ -276,7 +279,7 @@ func smartBacktick(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, | ||||
| 	return 0 | ||||
| } | ||||
| 
 | ||||
| func smartNumberGeneric(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int { | ||||
| func (r *SPRenderer) smartNumberGeneric(out *bytes.Buffer, previousChar byte, text []byte) int { | ||||
| 	if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 { | ||||
| 		// is it of the form digits/digits(word boundary)?, i.e., \d+/\d+\b | ||||
| 		// note: check for regular slash (/) or fraction slash (⁄, 0x2044, or 0xe2 81 84 in utf-8) | ||||
| @@ -318,7 +321,7 @@ func smartNumberGeneric(out *bytes.Buffer, smrt *smartypantsData, previousChar b | ||||
| 	return 0 | ||||
| } | ||||
| 
 | ||||
| func smartNumber(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int { | ||||
| func (r *SPRenderer) smartNumber(out *bytes.Buffer, previousChar byte, text []byte) int { | ||||
| 	if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 { | ||||
| 		if text[0] == '1' && text[1] == '/' && text[2] == '2' { | ||||
| 			if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' { | ||||
| @@ -346,27 +349,27 @@ func smartNumber(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, te | ||||
| 	return 0 | ||||
| } | ||||
| 
 | ||||
| func smartDoubleQuoteVariant(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte, quote byte) int { | ||||
| func (r *SPRenderer) smartDoubleQuoteVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte) int { | ||||
| 	nextChar := byte(0) | ||||
| 	if len(text) > 1 { | ||||
| 		nextChar = text[1] | ||||
| 	} | ||||
| 	if !smartQuoteHelper(out, previousChar, nextChar, quote, &smrt.inDoubleQuote, false) { | ||||
| 	if !smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, false) { | ||||
| 		out.WriteString(""") | ||||
| 	} | ||||
| 
 | ||||
| 	return 0 | ||||
| } | ||||
| 
 | ||||
| func smartDoubleQuote(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int { | ||||
| 	return smartDoubleQuoteVariant(out, smrt, previousChar, text, 'd') | ||||
| func (r *SPRenderer) smartDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { | ||||
| 	return r.smartDoubleQuoteVariant(out, previousChar, text, 'd') | ||||
| } | ||||
| 
 | ||||
| func smartAngledDoubleQuote(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int { | ||||
| 	return smartDoubleQuoteVariant(out, smrt, previousChar, text, 'a') | ||||
| func (r *SPRenderer) smartAngledDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { | ||||
| 	return r.smartDoubleQuoteVariant(out, previousChar, text, 'a') | ||||
| } | ||||
| 
 | ||||
| func smartLeftAngle(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int { | ||||
| func (r *SPRenderer) smartLeftAngle(out *bytes.Buffer, previousChar byte, text []byte) int { | ||||
| 	i := 0 | ||||
| 
 | ||||
| 	for i < len(text) && text[i] != '>' { | ||||
| @@ -377,54 +380,78 @@ func smartLeftAngle(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, | ||||
| 	return i | ||||
| } | ||||
| 
 | ||||
| type smartCallback func(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int | ||||
| type smartCallback func(out *bytes.Buffer, previousChar byte, text []byte) int | ||||
| 
 | ||||
| type smartypantsRenderer [256]smartCallback | ||||
| // NewSmartypantsRenderer constructs a Smartypants renderer object. | ||||
| func NewSmartypantsRenderer(flags HTMLFlags) *SPRenderer { | ||||
| 	var ( | ||||
| 		r SPRenderer | ||||
| 
 | ||||
| var ( | ||||
| 	smartAmpAngled      = smartAmp(true, false) | ||||
| 	smartAmpAngledNBSP  = smartAmp(true, true) | ||||
| 	smartAmpRegular     = smartAmp(false, false) | ||||
| 	smartAmpRegularNBSP = smartAmp(false, true) | ||||
| ) | ||||
| 		smartAmpAngled      = r.smartAmp(true, false) | ||||
| 		smartAmpAngledNBSP  = r.smartAmp(true, true) | ||||
| 		smartAmpRegular     = r.smartAmp(false, false) | ||||
| 		smartAmpRegularNBSP = r.smartAmp(false, true) | ||||
| 
 | ||||
| func smartypants(flags int) *smartypantsRenderer { | ||||
| 	r := new(smartypantsRenderer) | ||||
| 	addNBSP := flags&HTML_SMARTYPANTS_QUOTES_NBSP != 0 | ||||
| 	if flags&HTML_SMARTYPANTS_ANGLED_QUOTES == 0 { | ||||
| 		r['"'] = smartDoubleQuote | ||||
| 		addNBSP = flags&SmartypantsQuotesNBSP != 0 | ||||
| 	) | ||||
| 
 | ||||
| 	if flags&SmartypantsAngledQuotes == 0 { | ||||
| 		r.callbacks['"'] = r.smartDoubleQuote | ||||
| 		if !addNBSP { | ||||
| 			r['&'] = smartAmpRegular | ||||
| 			r.callbacks['&'] = smartAmpRegular | ||||
| 		} else { | ||||
| 			r['&'] = smartAmpRegularNBSP | ||||
| 			r.callbacks['&'] = smartAmpRegularNBSP | ||||
| 		} | ||||
| 	} else { | ||||
| 		r['"'] = smartAngledDoubleQuote | ||||
| 		r.callbacks['"'] = r.smartAngledDoubleQuote | ||||
| 		if !addNBSP { | ||||
| 			r['&'] = smartAmpAngled | ||||
| 			r.callbacks['&'] = smartAmpAngled | ||||
| 		} else { | ||||
| 			r['&'] = smartAmpAngledNBSP | ||||
| 			r.callbacks['&'] = smartAmpAngledNBSP | ||||
| 		} | ||||
| 	} | ||||
| 	r['\''] = smartSingleQuote | ||||
| 	r['('] = smartParens | ||||
| 	if flags&HTML_SMARTYPANTS_DASHES != 0 { | ||||
| 		if flags&HTML_SMARTYPANTS_LATEX_DASHES == 0 { | ||||
| 			r['-'] = smartDash | ||||
| 	r.callbacks['\''] = r.smartSingleQuote | ||||
| 	r.callbacks['('] = r.smartParens | ||||
| 	if flags&SmartypantsDashes != 0 { | ||||
| 		if flags&SmartypantsLatexDashes == 0 { | ||||
| 			r.callbacks['-'] = r.smartDash | ||||
| 		} else { | ||||
| 			r['-'] = smartDashLatex | ||||
| 			r.callbacks['-'] = r.smartDashLatex | ||||
| 		} | ||||
| 	} | ||||
| 	r['.'] = smartPeriod | ||||
| 	if flags&HTML_SMARTYPANTS_FRACTIONS == 0 { | ||||
| 		r['1'] = smartNumber | ||||
| 		r['3'] = smartNumber | ||||
| 	r.callbacks['.'] = r.smartPeriod | ||||
| 	if flags&SmartypantsFractions == 0 { | ||||
| 		r.callbacks['1'] = r.smartNumber | ||||
| 		r.callbacks['3'] = r.smartNumber | ||||
| 	} else { | ||||
| 		for ch := '1'; ch <= '9'; ch++ { | ||||
| 			r[ch] = smartNumberGeneric | ||||
| 			r.callbacks[ch] = r.smartNumberGeneric | ||||
| 		} | ||||
| 	} | ||||
| 	r['<'] = smartLeftAngle | ||||
| 	r['`'] = smartBacktick | ||||
| 	return r | ||||
| 	r.callbacks['<'] = r.smartLeftAngle | ||||
| 	r.callbacks['`'] = r.smartBacktick | ||||
| 	return &r | ||||
| } | ||||
| 
 | ||||
| // Process is the entry point of the Smartypants renderer. | ||||
| func (r *SPRenderer) Process(w io.Writer, text []byte) { | ||||
| 	mark := 0 | ||||
| 	for i := 0; i < len(text); i++ { | ||||
| 		if action := r.callbacks[text[i]]; action != nil { | ||||
| 			if i > mark { | ||||
| 				w.Write(text[mark:i]) | ||||
| 			} | ||||
| 			previousChar := byte(0) | ||||
| 			if i > 0 { | ||||
| 				previousChar = text[i-1] | ||||
| 			} | ||||
| 			var tmp bytes.Buffer | ||||
| 			i += action(&tmp, previousChar, text[i:]) | ||||
| 			w.Write(tmp.Bytes()) | ||||
| 			mark = i + 1 | ||||
| 		} | ||||
| 	} | ||||
| 	if mark < len(text) { | ||||
| 		w.Write(text[mark:]) | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										6
									
								
								vendor/github.com/shurcooL/sanitized_anchor_name/.travis.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										6
									
								
								vendor/github.com/shurcooL/sanitized_anchor_name/.travis.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -1,11 +1,11 @@ | ||||
| sudo: false | ||||
| language: go | ||||
| go: | ||||
|   - 1.7 | ||||
|   - tip | ||||
|   - 1.x | ||||
|   - master | ||||
| matrix: | ||||
|   allow_failures: | ||||
|     - go: tip | ||||
|     - go: master | ||||
|   fast_finish: true | ||||
| install: | ||||
|   - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). | ||||
|   | ||||
							
								
								
									
										10
									
								
								vendor/github.com/shurcooL/sanitized_anchor_name/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										10
									
								
								vendor/github.com/shurcooL/sanitized_anchor_name/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -1,3 +1,5 @@ | ||||
| MIT License | ||||
|  | ||||
| Copyright (c) 2015 Dmitri Shuralyov | ||||
|  | ||||
| Permission is hereby granted, free of charge, to any person obtaining a copy | ||||
| @@ -7,13 +9,13 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | ||||
| copies of the Software, and to permit persons to whom the Software is | ||||
| furnished to do so, subject to the following conditions: | ||||
|  | ||||
| The above copyright notice and this permission notice shall be included in | ||||
| all copies or substantial portions of the Software. | ||||
| The above copyright notice and this permission notice shall be included in all | ||||
| copies or substantial portions of the Software. | ||||
|  | ||||
| THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||
| IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||||
| FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||||
| AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||||
| LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||||
| OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | ||||
| THE SOFTWARE. | ||||
| OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||||
| SOFTWARE. | ||||
|   | ||||
							
								
								
									
										6
									
								
								vendor/github.com/shurcooL/sanitized_anchor_name/README.md
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										6
									
								
								vendor/github.com/shurcooL/sanitized_anchor_name/README.md
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -5,9 +5,11 @@ sanitized_anchor_name | ||||
|  | ||||
| Package sanitized_anchor_name provides a func to create sanitized anchor names. | ||||
|  | ||||
| Its logic can be reused by multiple packages to create interoperable anchor names and links to those anchors. | ||||
| Its logic can be reused by multiple packages to create interoperable anchor names | ||||
| and links to those anchors. | ||||
|  | ||||
| At this time, it does not try to ensure that generated anchor names are unique, that responsibility falls on the caller. | ||||
| At this time, it does not try to ensure that generated anchor names | ||||
| are unique, that responsibility falls on the caller. | ||||
|  | ||||
| Installation | ||||
| ------------ | ||||
|   | ||||
							
								
								
									
										1
									
								
								vendor/github.com/shurcooL/sanitized_anchor_name/go.mod
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										1
									
								
								vendor/github.com/shurcooL/sanitized_anchor_name/go.mod
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1 @@ | ||||
| module github.com/shurcooL/sanitized_anchor_name | ||||
							
								
								
									
										2
									
								
								vendor/github.com/shurcooL/sanitized_anchor_name/main.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								vendor/github.com/shurcooL/sanitized_anchor_name/main.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -13,7 +13,7 @@ import "unicode" | ||||
| func Create(text string) string { | ||||
| 	var anchorName []rune | ||||
| 	var futureDash = false | ||||
| 	for _, r := range []rune(text) { | ||||
| 	for _, r := range text { | ||||
| 		switch { | ||||
| 		case unicode.IsLetter(r) || unicode.IsNumber(r): | ||||
| 			if futureDash && len(anchorName) > 0 { | ||||
|   | ||||
							
								
								
									
										6
									
								
								vendor/golang.org/x/net/html/token.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										6
									
								
								vendor/golang.org/x/net/html/token.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -347,6 +347,7 @@ loop: | ||||
| 			break loop | ||||
| 		} | ||||
| 		if c != '/' { | ||||
| 			z.raw.end-- | ||||
| 			continue loop | ||||
| 		} | ||||
| 		if z.readRawEndTag() || z.err != nil { | ||||
| @@ -1067,6 +1068,11 @@ loop: | ||||
|  | ||||
| // Raw returns the unmodified text of the current token. Calling Next, Token, | ||||
| // Text, TagName or TagAttr may change the contents of the returned slice. | ||||
| // | ||||
| // The token stream's raw bytes partition the byte stream (up until an | ||||
| // ErrorToken). There are no overlaps or gaps between two consecutive token's | ||||
| // raw bytes. One implication is that the byte offset of the current token is | ||||
| // the sum of the lengths of all previous tokens' raw bytes. | ||||
| func (z *Tokenizer) Raw() []byte { | ||||
| 	return z.buf[z.raw.start:z.raw.end] | ||||
| } | ||||
|   | ||||
							
								
								
									
										12
									
								
								vendor/modules.txt
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										12
									
								
								vendor/modules.txt
									
									
									
									
										vendored
									
									
								
							| @@ -95,8 +95,6 @@ github.com/boombuler/barcode/qr | ||||
| github.com/boombuler/barcode/utils | ||||
| # github.com/bradfitz/gomemcache v0.0.0-20190329173943-551aad21a668 | ||||
| github.com/bradfitz/gomemcache/memcache | ||||
| # github.com/chaseadamsio/goorgeous v0.0.0-20170901132237-098da33fde5f | ||||
| github.com/chaseadamsio/goorgeous | ||||
| # github.com/couchbase/gomemcached v0.0.0-20190515232915-c4b4ca0eb21d | ||||
| github.com/couchbase/gomemcached | ||||
| github.com/couchbase/gomemcached/client | ||||
| @@ -332,6 +330,8 @@ github.com/mschoch/smat | ||||
| github.com/msteinert/pam | ||||
| # github.com/nfnt/resize v0.0.0-20160724205520-891127d8d1b5 | ||||
| github.com/nfnt/resize | ||||
| # github.com/niklasfasching/go-org v0.1.7 | ||||
| github.com/niklasfasching/go-org/org | ||||
| # github.com/oliamb/cutter v0.2.2 | ||||
| github.com/oliamb/cutter | ||||
| # github.com/pelletier/go-toml v1.4.0 | ||||
| @@ -360,15 +360,15 @@ github.com/prometheus/common/model | ||||
| github.com/prometheus/procfs | ||||
| github.com/prometheus/procfs/internal/fs | ||||
| github.com/prometheus/procfs/internal/util | ||||
| # github.com/russross/blackfriday v0.0.0-20180428102519-11635eb403ff | ||||
| github.com/russross/blackfriday | ||||
| # github.com/russross/blackfriday/v2 v2.0.1 | ||||
| github.com/russross/blackfriday/v2 | ||||
| # github.com/satori/go.uuid v1.2.0 | ||||
| github.com/satori/go.uuid | ||||
| # github.com/sergi/go-diff v1.0.0 | ||||
| github.com/sergi/go-diff/diffmatchpatch | ||||
| # github.com/shurcooL/httpfs v0.0.0-20190527155220-6a4d4a70508b | ||||
| github.com/shurcooL/httpfs/vfsutil | ||||
| # github.com/shurcooL/sanitized_anchor_name v0.0.0-20160918041101-1dba4b3954bc | ||||
| # github.com/shurcooL/sanitized_anchor_name v1.0.0 | ||||
| github.com/shurcooL/sanitized_anchor_name | ||||
| # github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd | ||||
| github.com/shurcooL/vfsgen | ||||
| @@ -464,7 +464,7 @@ golang.org/x/crypto/scrypt | ||||
| golang.org/x/crypto/ssh | ||||
| golang.org/x/crypto/ssh/agent | ||||
| golang.org/x/crypto/ssh/knownhosts | ||||
| # golang.org/x/net v0.0.0-20190909003024-a7b16738d86b | ||||
| # golang.org/x/net v0.0.0-20191028085509-fe3aa8a45271 | ||||
| golang.org/x/net/context | ||||
| golang.org/x/net/context/ctxhttp | ||||
| golang.org/x/net/html | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Lauris BH
					Lauris BH