diff --git a/main.go b/main.go
index 383dbc2093..c2acda99af 100644
--- a/main.go
+++ b/main.go
@@ -13,6 +13,10 @@ import (
"code.gitea.io/gitea/cmd"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/setting"
+ // register supported doc types
+ _ "code.gitea.io/gitea/modules/markup/markdown"
+ _ "code.gitea.io/gitea/modules/markup/orgmode"
+
"github.com/urfave/cli"
)
diff --git a/models/mail.go b/models/mail.go
index afcddb6d23..98766f69f2 100644
--- a/models/mail.go
+++ b/models/mail.go
@@ -13,8 +13,8 @@ import (
"code.gitea.io/gitea/modules/base"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/mailer"
- "code.gitea.io/gitea/modules/markdown"
"code.gitea.io/gitea/modules/markup"
+ "code.gitea.io/gitea/modules/markup/markdown"
"code.gitea.io/gitea/modules/setting"
"gopkg.in/gomail.v2"
"gopkg.in/macaron.v1"
diff --git a/modules/markup/html_test.go b/modules/markup/html_test.go
index 407115526d..ab2ca5ef47 100644
--- a/modules/markup/html_test.go
+++ b/modules/markup/html_test.go
@@ -10,8 +10,8 @@ import (
"strings"
"testing"
- _ "code.gitea.io/gitea/modules/markdown"
. "code.gitea.io/gitea/modules/markup"
+ _ "code.gitea.io/gitea/modules/markup/markdown"
"code.gitea.io/gitea/modules/setting"
"github.com/stretchr/testify/assert"
diff --git a/modules/markdown/markdown.go b/modules/markup/markdown/markdown.go
similarity index 95%
rename from modules/markdown/markdown.go
rename to modules/markup/markdown/markdown.go
index 6cf2d9eaa1..f0ed0e03ab 100644
--- a/modules/markdown/markdown.go
+++ b/modules/markup/markdown/markdown.go
@@ -17,8 +17,8 @@ import (
// Renderer is a extended version of underlying render object.
type Renderer struct {
blackfriday.Renderer
- urlPrefix string
- isWikiMarkdown bool
+ URLPrefix string
+ IsWiki bool
}
// Link defines how formal links should be processed to produce corresponding HTML elements.
@@ -26,10 +26,10 @@ func (r *Renderer) Link(out *bytes.Buffer, link []byte, title []byte, content []
if len(link) > 0 && !markup.IsLink(link) {
if link[0] != '#' {
lnk := string(link)
- if r.isWikiMarkdown {
+ if r.IsWiki {
lnk = markup.URLJoin("wiki", lnk)
}
- mLink := markup.URLJoin(r.urlPrefix, lnk)
+ mLink := markup.URLJoin(r.URLPrefix, lnk)
link = []byte(mLink)
}
}
@@ -95,8 +95,8 @@ var (
// Image defines how images should be processed to produce corresponding HTML elements.
func (r *Renderer) Image(out *bytes.Buffer, link []byte, title []byte, alt []byte) {
- prefix := r.urlPrefix
- if r.isWikiMarkdown {
+ prefix := r.URLPrefix
+ if r.IsWiki {
prefix = markup.URLJoin(prefix, "wiki", "src")
}
prefix = strings.Replace(prefix, "/src/", "/raw/", 1)
@@ -129,9 +129,9 @@ func RenderRaw(body []byte, urlPrefix string, wikiMarkdown bool) []byte {
htmlFlags |= blackfriday.HTML_SKIP_STYLE
htmlFlags |= blackfriday.HTML_OMIT_CONTENTS
renderer := &Renderer{
- Renderer: blackfriday.HtmlRenderer(htmlFlags, "", ""),
- urlPrefix: urlPrefix,
- isWikiMarkdown: wikiMarkdown,
+ Renderer: blackfriday.HtmlRenderer(htmlFlags, "", ""),
+ URLPrefix: urlPrefix,
+ IsWiki: wikiMarkdown,
}
// set up the parser
diff --git a/modules/markdown/markdown_test.go b/modules/markup/markdown/markdown_test.go
similarity index 89%
rename from modules/markdown/markdown_test.go
rename to modules/markup/markdown/markdown_test.go
index 1b57e4f203..9ca3de01ca 100644
--- a/modules/markdown/markdown_test.go
+++ b/modules/markup/markdown/markdown_test.go
@@ -5,13 +5,11 @@
package markdown_test
import (
- "fmt"
- "strconv"
"strings"
"testing"
- . "code.gitea.io/gitea/modules/markdown"
"code.gitea.io/gitea/modules/markup"
+ . "code.gitea.io/gitea/modules/markup/markdown"
"code.gitea.io/gitea/modules/setting"
"github.com/stretchr/testify/assert"
@@ -21,45 +19,6 @@ const AppURL = "http://localhost:3000/"
const Repo = "gogits/gogs"
const AppSubURL = AppURL + Repo + "/"
-var numericMetas = map[string]string{
- "format": "https://someurl.com/{user}/{repo}/{index}",
- "user": "someUser",
- "repo": "someRepo",
- "style": markup.IssueNameStyleNumeric,
-}
-
-var alphanumericMetas = map[string]string{
- "format": "https://someurl.com/{user}/{repo}/{index}",
- "user": "someUser",
- "repo": "someRepo",
- "style": markup.IssueNameStyleAlphanumeric,
-}
-
-// numericLink an HTML to a numeric-style issue
-func numericIssueLink(baseURL string, index int) string {
- return link(markup.URLJoin(baseURL, strconv.Itoa(index)), fmt.Sprintf("#%d", index))
-}
-
-// alphanumLink an HTML link to an alphanumeric-style issue
-func alphanumIssueLink(baseURL string, name string) string {
- return link(markup.URLJoin(baseURL, name), name)
-}
-
-// urlContentsLink an HTML link whose contents is the target URL
-func urlContentsLink(href string) string {
- return link(href, href)
-}
-
-// link an HTML link
-func link(href, contents string) string {
- return fmt.Sprintf("%s", href, contents)
-}
-
-func testRenderIssueIndexPattern(t *testing.T, input, expected string, metas map[string]string) {
- assert.Equal(t, expected,
- string(markup.RenderIssueIndexPattern([]byte(input), AppSubURL, metas)))
-}
-
func TestRender_StandardLinks(t *testing.T) {
setting.AppURL = AppURL
setting.AppSubURL = AppSubURL
diff --git a/modules/markup/markup_test.go b/modules/markup/markup_test.go
index 8d061ae39e..b0ebfae57d 100644
--- a/modules/markup/markup_test.go
+++ b/modules/markup/markup_test.go
@@ -7,8 +7,8 @@ package markup_test
import (
"testing"
- _ "code.gitea.io/gitea/modules/markdown"
. "code.gitea.io/gitea/modules/markup"
+ _ "code.gitea.io/gitea/modules/markup/markdown"
"github.com/stretchr/testify/assert"
)
diff --git a/modules/markup/orgmode/orgmode.go b/modules/markup/orgmode/orgmode.go
new file mode 100644
index 0000000000..f9223a18b5
--- /dev/null
+++ b/modules/markup/orgmode/orgmode.go
@@ -0,0 +1,56 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package markup
+
+import (
+ "code.gitea.io/gitea/modules/markup"
+ "code.gitea.io/gitea/modules/markup/markdown"
+
+ "github.com/chaseadamsio/goorgeous"
+ "github.com/russross/blackfriday"
+)
+
+func init() {
+ markup.RegisterParser(Parser{})
+}
+
+// Parser implements markup.Parser for orgmode
+type Parser struct {
+}
+
+// Name implements markup.Parser
+func (Parser) Name() string {
+ return "orgmode"
+}
+
+// Extensions implements markup.Parser
+func (Parser) Extensions() []string {
+ return []string{".org"}
+}
+
+// Render renders orgmode rawbytes to HTML
+func Render(rawBytes []byte, urlPrefix string, metas map[string]string, isWiki bool) []byte {
+ htmlFlags := blackfriday.HTML_USE_XHTML
+ htmlFlags |= blackfriday.HTML_SKIP_STYLE
+ htmlFlags |= blackfriday.HTML_OMIT_CONTENTS
+ renderer := &markdown.Renderer{
+ Renderer: blackfriday.HtmlRenderer(htmlFlags, "", ""),
+ URLPrefix: urlPrefix,
+ IsWiki: isWiki,
+ }
+
+ result := goorgeous.Org(rawBytes, renderer)
+ return result
+}
+
+// RenderString reners orgmode string to HTML string
+func RenderString(rawContent string, urlPrefix string, metas map[string]string, isWiki bool) string {
+ return string(Render([]byte(rawContent), urlPrefix, metas, isWiki))
+}
+
+// Render implements markup.Parser
+func (Parser) Render(rawBytes []byte, urlPrefix string, metas map[string]string, isWiki bool) []byte {
+ return Render(rawBytes, urlPrefix, metas, isWiki)
+}
diff --git a/modules/markup/orgmode/orgmode_test.go b/modules/markup/orgmode/orgmode_test.go
new file mode 100644
index 0000000000..a68ab5d3af
--- /dev/null
+++ b/modules/markup/orgmode/orgmode_test.go
@@ -0,0 +1,54 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package markup
+
+import (
+ "strings"
+ "testing"
+
+ "code.gitea.io/gitea/modules/markup"
+ "code.gitea.io/gitea/modules/setting"
+
+ "github.com/stretchr/testify/assert"
+)
+
+const AppURL = "http://localhost:3000/"
+const Repo = "gogits/gogs"
+const AppSubURL = AppURL + Repo + "/"
+
+func TestRender_StandardLinks(t *testing.T) {
+ setting.AppURL = AppURL
+ setting.AppSubURL = AppSubURL
+
+ test := func(input, expected string) {
+ buffer := RenderString(input, setting.AppSubURL, nil, false)
+ assert.Equal(t, strings.TrimSpace(expected), strings.TrimSpace(buffer))
+ }
+
+ googleRendered := `
https://google.com/
`
+ test("[[https://google.com/]]", googleRendered)
+
+ lnk := markup.URLJoin(AppSubURL, "WikiPage")
+ test("[[WikiPage][WikiPage]]",
+ `WikiPage
`)
+}
+
+func TestRender_Images(t *testing.T) {
+ setting.AppURL = AppURL
+ setting.AppSubURL = AppSubURL
+
+ test := func(input, expected string) {
+ buffer := RenderString(input, setting.AppSubURL, nil, false)
+ assert.Equal(t, strings.TrimSpace(expected), strings.TrimSpace(buffer))
+ }
+
+ url := "../../.images/src/02/train.jpg"
+ title := "Train"
+ result := markup.URLJoin(AppSubURL, url)
+
+ test(
+ "[[file:"+url+"]["+title+"]]",
+ `
`)
+}
diff --git a/routers/api/v1/misc/markdown.go b/routers/api/v1/misc/markdown.go
index a2e65ecb0a..8e3c66841f 100644
--- a/routers/api/v1/misc/markdown.go
+++ b/routers/api/v1/misc/markdown.go
@@ -8,8 +8,8 @@ import (
api "code.gitea.io/sdk/gitea"
"code.gitea.io/gitea/modules/context"
- "code.gitea.io/gitea/modules/markdown"
"code.gitea.io/gitea/modules/markup"
+ "code.gitea.io/gitea/modules/markup/markdown"
"code.gitea.io/gitea/modules/setting"
)
diff --git a/routers/repo/issue.go b/routers/repo/issue.go
index 4c4f9037bf..091268116b 100644
--- a/routers/repo/issue.go
+++ b/routers/repo/issue.go
@@ -24,7 +24,7 @@ import (
"code.gitea.io/gitea/modules/context"
"code.gitea.io/gitea/modules/indexer"
"code.gitea.io/gitea/modules/log"
- "code.gitea.io/gitea/modules/markdown"
+ "code.gitea.io/gitea/modules/markup/markdown"
"code.gitea.io/gitea/modules/notification"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/util"
diff --git a/routers/repo/release.go b/routers/repo/release.go
index fe68f1b6f1..da99dd7713 100644
--- a/routers/repo/release.go
+++ b/routers/repo/release.go
@@ -12,7 +12,7 @@ import (
"code.gitea.io/gitea/modules/base"
"code.gitea.io/gitea/modules/context"
"code.gitea.io/gitea/modules/log"
- "code.gitea.io/gitea/modules/markdown"
+ "code.gitea.io/gitea/modules/markup/markdown"
"code.gitea.io/gitea/modules/setting"
"github.com/Unknwon/paginater"
diff --git a/routers/repo/view.go b/routers/repo/view.go
index d794a57405..bfba7acac8 100644
--- a/routers/repo/view.go
+++ b/routers/repo/view.go
@@ -95,11 +95,11 @@ func renderDirectory(ctx *context.Context, treeLink string) {
buf = append(buf, d...)
newbuf := markup.Render(readmeFile.Name(), buf, treeLink, ctx.Repo.Repository.ComposeMetas())
if newbuf != nil {
- ctx.Data["IsMarkdown"] = true
+ ctx.Data["IsMarkup"] = true
} else {
// FIXME This is the only way to show non-markdown files
// instead of a broken "View Raw" link
- ctx.Data["IsMarkdown"] = true
+ ctx.Data["IsMarkup"] = false
newbuf = bytes.Replace(buf, []byte("\n"), []byte(`
`), -1)
}
ctx.Data["FileContent"] = string(newbuf)
@@ -197,10 +197,8 @@ func renderFile(ctx *context.Context, entry *git.TreeEntry, treeLink, rawLink st
tp := markup.Type(blob.Name())
isSupportedMarkup := tp != ""
- // FIXME: currently set IsMarkdown for compatible
- ctx.Data["IsMarkdown"] = isSupportedMarkup
-
- readmeExist := isSupportedMarkup || markup.IsReadmeFile(blob.Name())
+ ctx.Data["IsMarkup"] = isSupportedMarkup
+ readmeExist := markup.IsReadmeFile(blob.Name())
ctx.Data["ReadmeExist"] = readmeExist
if readmeExist && isSupportedMarkup {
ctx.Data["FileContent"] = string(markup.Render(blob.Name(), buf, path.Dir(treeLink), ctx.Repo.Repository.ComposeMetas()))
diff --git a/routers/repo/wiki.go b/routers/repo/wiki.go
index 2a73fdc41e..019c3d5d16 100644
--- a/routers/repo/wiki.go
+++ b/routers/repo/wiki.go
@@ -18,8 +18,8 @@ import (
"code.gitea.io/gitea/modules/auth"
"code.gitea.io/gitea/modules/base"
"code.gitea.io/gitea/modules/context"
- "code.gitea.io/gitea/modules/markdown"
"code.gitea.io/gitea/modules/markup"
+ "code.gitea.io/gitea/modules/markup/markdown"
)
const (
diff --git a/templates/repo/view_file.tmpl b/templates/repo/view_file.tmpl
index 36fccb00b3..898b9b5557 100644
--- a/templates/repo/view_file.tmpl
+++ b/templates/repo/view_file.tmpl
@@ -36,8 +36,8 @@
{{end}}
-
- {{if .IsMarkdown}}
+
+ {{if .IsMarkup}}
{{if .FileContent}}{{.FileContent | Str2html}}{{end}}
{{else if not .IsTextFile}}
diff --git a/vendor/github.com/chaseadamsio/goorgeous/LICENSE b/vendor/github.com/chaseadamsio/goorgeous/LICENSE
new file mode 100644
index 0000000000..d7a37c6a3b
--- /dev/null
+++ b/vendor/github.com/chaseadamsio/goorgeous/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2017 Chase Adams
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/chaseadamsio/goorgeous/README.org b/vendor/github.com/chaseadamsio/goorgeous/README.org
new file mode 100644
index 0000000000..37e0f2ec73
--- /dev/null
+++ b/vendor/github.com/chaseadamsio/goorgeous/README.org
@@ -0,0 +1,66 @@
+#+TITLE: chaseadamsio/goorgeous
+
+[[https://travis-ci.org/chaseadamsio/goorgeous.svg?branch=master]]
+[[https://coveralls.io/repos/github/chaseadamsio/goorgeous/badge.svg?branch=master]]
+
+/goorgeous is a Go Org to HTML Parser./
+
+[[file:gopher_small.gif]]
+
+*Pronounced: Go? Org? Yes!*
+
+#+BEGIN_QUOTE
+"Org mode is for keeping notes, maintaining TODO lists, planning projects, and authoring documents with a fast and effective plain-text system."
+
+- [[orgmode.org]]
+#+END_QUOTE
+
+The purpose of this package is to come as close as possible as parsing an =*.org= document into HTML, the same way one might publish [[http://orgmode.org/worg/org-tutorials/org-publish-html-tutorial.html][with org-publish-html from Emacs]].
+
+* Installation
+
+#+BEGIN_SRC sh
+ go get -u github.com/chaseadamsio/goorgeous
+#+END_SRC
+
+* Usage
+
+** Org Headers
+
+To retrieve the headers from a =[]byte=, call =OrgHeaders= and it will return a =map[string]interface{}=:
+
+#+BEGIN_SRC go
+ input := "#+title: goorgeous\n* Some Headline\n"
+ out := goorgeous.OrgHeaders(input)
+#+END_SRC
+
+#+BEGIN_SRC go
+ map[string]interface{}{
+ "title": "goorgeous"
+ }
+#+END_SRC
+
+** Org Content
+
+After importing =github.com/chaseadamsio/goorgeous=, you can call =Org= with a =[]byte= and it will return an =html= version of the content as a =[]byte=
+
+#+BEGIN_SRC go
+ input := "#+TITLE: goorgeous\n* Some Headline\n"
+ out := goorgeous.Org(input)
+#+END_SRC
+
+=out= will be:
+
+#+BEGIN_SRC html
+ Some Headline
/n
+#+END_SRC
+
+* Why?
+
+First off, I've become an unapologetic user of Emacs & ever since finding =org-mode= I use it for anything having to do with writing content, organizing my life and keeping documentation of my days/weeks/months.
+
+Although I like Emacs & =emacs-lisp=, I publish all of my html sites with [[https://gohugo.io][Hugo Static Site Generator]] and wanted to be able to write my content in =org-mode= in Emacs rather than markdown.
+
+Hugo's implementation of templating and speed are unmatched, so the only way I knew for sure I could continue to use Hugo and write in =org-mode= seamlessly was to write a golang parser for org content and submit a PR for Hugo to use it.
+* Acknowledgements
+I leaned heavily on russross' [[https://github.com/russross/blackfriday][blackfriday markdown renderer]] as both an example of how to write a parser (with some updates to leverage the go we know today) and reusing the blackfriday HTML Renderer so I didn't have to write my own!
diff --git a/vendor/github.com/chaseadamsio/goorgeous/goorgeous.go b/vendor/github.com/chaseadamsio/goorgeous/goorgeous.go
new file mode 100644
index 0000000000..f1b2671d65
--- /dev/null
+++ b/vendor/github.com/chaseadamsio/goorgeous/goorgeous.go
@@ -0,0 +1,803 @@
+package goorgeous
+
+import (
+ "bufio"
+ "bytes"
+ "regexp"
+
+ "github.com/russross/blackfriday"
+ "github.com/shurcooL/sanitized_anchor_name"
+)
+
+type inlineParser func(p *parser, out *bytes.Buffer, data []byte, offset int) int
+
+type footnotes struct {
+ id string
+ def string
+}
+
+type parser struct {
+ r blackfriday.Renderer
+ inlineCallback [256]inlineParser
+ notes []footnotes
+}
+
+// NewParser returns a new parser with the inlineCallbacks required for org content
+func NewParser(renderer blackfriday.Renderer) *parser {
+ p := new(parser)
+ p.r = renderer
+
+ p.inlineCallback['='] = generateVerbatim
+ p.inlineCallback['~'] = generateCode
+ p.inlineCallback['/'] = generateEmphasis
+ p.inlineCallback['_'] = generateUnderline
+ p.inlineCallback['*'] = generateBold
+ p.inlineCallback['+'] = generateStrikethrough
+ p.inlineCallback['['] = generateLinkOrImg
+
+ return p
+}
+
+// OrgCommon is the easiest way to parse a byte slice of org content and makes assumptions
+// that the caller wants to use blackfriday's HTMLRenderer with XHTML
+func OrgCommon(input []byte) []byte {
+ renderer := blackfriday.HtmlRenderer(blackfriday.HTML_USE_XHTML, "", "")
+ return OrgOptions(input, renderer)
+}
+
+// Org is a convenience name for OrgOptions
+func Org(input []byte, renderer blackfriday.Renderer) []byte {
+ return OrgOptions(input, renderer)
+}
+
+// OrgOptions takes an org content byte slice and a renderer to use
+func OrgOptions(input []byte, renderer blackfriday.Renderer) []byte {
+ // in the case that we need to render something in isEmpty but there isn't a new line char
+ input = append(input, '\n')
+ var output bytes.Buffer
+
+ p := NewParser(renderer)
+
+ scanner := bufio.NewScanner(bytes.NewReader(input))
+ // used to capture code blocks
+ marker := ""
+ syntax := ""
+ listType := ""
+ inParagraph := false
+ inList := false
+ inTable := false
+ inFixedWidthArea := false
+ var tmpBlock bytes.Buffer
+
+ for scanner.Scan() {
+ data := scanner.Bytes()
+
+ if !isEmpty(data) && isComment(data) || IsKeyword(data) {
+ switch {
+ case inList:
+ if tmpBlock.Len() > 0 {
+ p.generateList(&output, tmpBlock.Bytes(), listType)
+ }
+ inList = false
+ listType = ""
+ tmpBlock.Reset()
+ case inTable:
+ if tmpBlock.Len() > 0 {
+ p.generateTable(&output, tmpBlock.Bytes())
+ }
+ inTable = false
+ tmpBlock.Reset()
+ case inParagraph:
+ if tmpBlock.Len() > 0 {
+ p.generateParagraph(&output, tmpBlock.Bytes()[:len(tmpBlock.Bytes())-1])
+ }
+ inParagraph = false
+ tmpBlock.Reset()
+ case inFixedWidthArea:
+ if tmpBlock.Len() > 0 {
+ tmpBlock.WriteString("\n")
+ output.Write(tmpBlock.Bytes())
+ }
+ inFixedWidthArea = false
+ tmpBlock.Reset()
+ }
+
+ }
+
+ switch {
+ case isEmpty(data):
+ switch {
+ case inList:
+ if tmpBlock.Len() > 0 {
+ p.generateList(&output, tmpBlock.Bytes(), listType)
+ }
+ inList = false
+ listType = ""
+ tmpBlock.Reset()
+ case inTable:
+ if tmpBlock.Len() > 0 {
+ p.generateTable(&output, tmpBlock.Bytes())
+ }
+ inTable = false
+ tmpBlock.Reset()
+ case inParagraph:
+ if tmpBlock.Len() > 0 {
+ p.generateParagraph(&output, tmpBlock.Bytes()[:len(tmpBlock.Bytes())-1])
+ }
+ inParagraph = false
+ tmpBlock.Reset()
+ case inFixedWidthArea:
+ if tmpBlock.Len() > 0 {
+ tmpBlock.WriteString("\n")
+ output.Write(tmpBlock.Bytes())
+ }
+ inFixedWidthArea = false
+ tmpBlock.Reset()
+ case marker != "":
+ tmpBlock.WriteByte('\n')
+ default:
+ continue
+ }
+ case isPropertyDrawer(data) || marker == "PROPERTIES":
+ if marker == "" {
+ marker = "PROPERTIES"
+ }
+ if bytes.Equal(data, []byte(":END:")) {
+ marker = ""
+ }
+ continue
+ case isBlock(data) || marker != "":
+ matches := reBlock.FindSubmatch(data)
+ if len(matches) > 0 {
+ if string(matches[1]) == "END" {
+ switch marker {
+ case "QUOTE":
+ var tmpBuf bytes.Buffer
+ p.inline(&tmpBuf, tmpBlock.Bytes())
+ p.r.BlockQuote(&output, tmpBuf.Bytes())
+ case "CENTER":
+ var tmpBuf bytes.Buffer
+ output.WriteString("\n")
+ p.inline(&tmpBuf, tmpBlock.Bytes())
+ output.Write(tmpBuf.Bytes())
+ output.WriteString("\n")
+ default:
+ tmpBlock.WriteByte('\n')
+ p.r.BlockCode(&output, tmpBlock.Bytes(), syntax)
+ }
+ marker = ""
+ tmpBlock.Reset()
+ continue
+ }
+
+ }
+ if marker != "" {
+ if marker != "SRC" && marker != "EXAMPLE" {
+ var tmpBuf bytes.Buffer
+ tmpBuf.Write([]byte("\n"))
+ p.inline(&tmpBuf, data)
+ tmpBuf.WriteByte('\n')
+ tmpBuf.Write([]byte("
\n"))
+ tmpBlock.Write(tmpBuf.Bytes())
+
+ } else {
+ tmpBlock.WriteByte('\n')
+ tmpBlock.Write(data)
+ }
+
+ } else {
+ marker = string(matches[2])
+ syntax = string(matches[3])
+ }
+ case isFootnoteDef(data):
+ matches := reFootnoteDef.FindSubmatch(data)
+ for i := range p.notes {
+ if p.notes[i].id == string(matches[1]) {
+ p.notes[i].def = string(matches[2])
+ }
+ }
+ case isTable(data):
+ if inTable != true {
+ inTable = true
+ }
+ tmpBlock.Write(data)
+ tmpBlock.WriteByte('\n')
+ case IsKeyword(data):
+ continue
+ case isComment(data):
+ p.generateComment(&output, data)
+ case isHeadline(data):
+ p.generateHeadline(&output, data)
+ case isDefinitionList(data):
+ if inList != true {
+ listType = "dl"
+ inList = true
+ }
+ var work bytes.Buffer
+ flags := blackfriday.LIST_TYPE_DEFINITION
+ matches := reDefinitionList.FindSubmatch(data)
+ flags |= blackfriday.LIST_TYPE_TERM
+ p.inline(&work, matches[1])
+ p.r.ListItem(&tmpBlock, work.Bytes(), flags)
+ work.Reset()
+ flags &= ^blackfriday.LIST_TYPE_TERM
+ p.inline(&work, matches[2])
+ p.r.ListItem(&tmpBlock, work.Bytes(), flags)
+ case isUnorderedList(data):
+ if inList != true {
+ listType = "ul"
+ inList = true
+ }
+ matches := reUnorderedList.FindSubmatch(data)
+ var work bytes.Buffer
+ p.inline(&work, matches[2])
+ p.r.ListItem(&tmpBlock, work.Bytes(), 0)
+ case isOrderedList(data):
+ if inList != true {
+ listType = "ol"
+ inList = true
+ }
+ matches := reOrderedList.FindSubmatch(data)
+ var work bytes.Buffer
+ tmpBlock.WriteString(" 0 {
+ tmpBlock.WriteString(" value=\"")
+ tmpBlock.Write(matches[2])
+ tmpBlock.WriteString("\"")
+ matches[3] = matches[3][1:]
+ }
+ p.inline(&work, matches[3])
+ tmpBlock.WriteString(">")
+ tmpBlock.Write(work.Bytes())
+ tmpBlock.WriteString("\n")
+ case isHorizontalRule(data):
+ p.r.HRule(&output)
+ case isExampleLine(data):
+ if inParagraph == true {
+ if len(tmpBlock.Bytes()) > 0 {
+ p.generateParagraph(&output, tmpBlock.Bytes()[:len(tmpBlock.Bytes())-1])
+ inParagraph = false
+ }
+ tmpBlock.Reset()
+ }
+ if inFixedWidthArea != true {
+ tmpBlock.WriteString("\n")
+ inFixedWidthArea = true
+ }
+ matches := reExampleLine.FindSubmatch(data)
+ tmpBlock.Write(matches[1])
+ tmpBlock.WriteString("\n")
+ break
+ default:
+ if inParagraph == false {
+ inParagraph = true
+ if inFixedWidthArea == true {
+ if tmpBlock.Len() > 0 {
+ tmpBlock.WriteString("
")
+ output.Write(tmpBlock.Bytes())
+ }
+ inFixedWidthArea = false
+ tmpBlock.Reset()
+ }
+ }
+ tmpBlock.Write(data)
+ tmpBlock.WriteByte('\n')
+ }
+ }
+
+ if len(tmpBlock.Bytes()) > 0 {
+ if inParagraph == true {
+ p.generateParagraph(&output, tmpBlock.Bytes()[:len(tmpBlock.Bytes())-1])
+ } else if inFixedWidthArea == true {
+ tmpBlock.WriteString("\n")
+ output.Write(tmpBlock.Bytes())
+ }
+ }
+
+ // Writing footnote def. list
+ if len(p.notes) > 0 {
+ flags := blackfriday.LIST_ITEM_BEGINNING_OF_LIST
+ p.r.Footnotes(&output, func() bool {
+ for i := range p.notes {
+ p.r.FootnoteItem(&output, []byte(p.notes[i].id), []byte(p.notes[i].def), flags)
+ }
+ return true
+ })
+ }
+
+ return output.Bytes()
+}
+
+// Org Syntax has been broken up into 4 distinct sections based on
+// the org-syntax draft (http://orgmode.org/worg/dev/org-syntax.html):
+// - Headlines
+// - Greater Elements
+// - Elements
+// - Objects
+
+// Headlines
+func isHeadline(data []byte) bool {
+ if !charMatches(data[0], '*') {
+ return false
+ }
+ level := 0
+ for level < 6 && charMatches(data[level], '*') {
+ level++
+ }
+ return charMatches(data[level], ' ')
+}
+
+func (p *parser) generateHeadline(out *bytes.Buffer, data []byte) {
+ level := 1
+ status := ""
+ priority := ""
+
+ for level < 6 && data[level] == '*' {
+ level++
+ }
+
+ start := skipChar(data, level, ' ')
+
+ data = data[start:]
+ i := 0
+
+ // Check if has a status so it can be rendered as a separate span that can be hidden or
+ // modified with CSS classes
+ if hasStatus(data[i:4]) {
+ status = string(data[i:4])
+ i += 5 // one extra character for the next whitespace
+ }
+
+ // Check if the next byte is a priority marker
+ if data[i] == '[' && hasPriority(data[i+1]) {
+ priority = string(data[i+1])
+ i += 4 // for "[c]" + ' '
+ }
+
+ tags, tagsFound := findTags(data, i)
+
+ headlineID := sanitized_anchor_name.Create(string(data[i:]))
+
+ generate := func() bool {
+ dataEnd := len(data)
+ if tagsFound > 0 {
+ dataEnd = tagsFound
+ }
+
+ headline := bytes.TrimRight(data[i:dataEnd], " \t")
+
+ if status != "" {
+ out.WriteString("" + status + "")
+ out.WriteByte(' ')
+ }
+
+ if priority != "" {
+ out.WriteString("[" + priority + "]")
+ out.WriteByte(' ')
+ }
+
+ p.inline(out, headline)
+
+ if tagsFound > 0 {
+ for _, tag := range tags {
+ out.WriteByte(' ')
+ out.WriteString("" + tag + "")
+ out.WriteByte(' ')
+ }
+ }
+ return true
+ }
+
+ p.r.Header(out, generate, level, headlineID)
+}
+
+func hasStatus(data []byte) bool {
+ return bytes.Contains(data, []byte("TODO")) || bytes.Contains(data, []byte("DONE"))
+}
+
+func hasPriority(char byte) bool {
+ return (charMatches(char, 'A') || charMatches(char, 'B') || charMatches(char, 'C'))
+}
+
+func findTags(data []byte, start int) ([]string, int) {
+ tags := []string{}
+ tagOpener := 0
+ tagMarker := tagOpener
+ for tIdx := start; tIdx < len(data); tIdx++ {
+ if tagMarker > 0 && data[tIdx] == ':' {
+ tags = append(tags, string(data[tagMarker+1:tIdx]))
+ tagMarker = tIdx
+ }
+ if data[tIdx] == ':' && tagOpener == 0 && data[tIdx-1] == ' ' {
+ tagMarker = tIdx
+ tagOpener = tIdx
+ }
+ }
+ return tags, tagOpener
+}
+
+// Greater Elements
+// ~~ Definition Lists
+var reDefinitionList = regexp.MustCompile(`^\s*-\s+(.+?)\s+::\s+(.*)`)
+
+func isDefinitionList(data []byte) bool {
+ return reDefinitionList.Match(data)
+}
+
+// ~~ Example lines
+var reExampleLine = regexp.MustCompile(`^\s*:\s(\s*.*)|^\s*:$`)
+
+func isExampleLine(data []byte) bool {
+ return reExampleLine.Match(data)
+}
+
+// ~~ Ordered Lists
+var reOrderedList = regexp.MustCompile(`^(\s*)\d+\.\s+\[?@?(\d*)\]?(.+)`)
+
+func isOrderedList(data []byte) bool {
+ return reOrderedList.Match(data)
+}
+
+// ~~ Unordered Lists
+var reUnorderedList = regexp.MustCompile(`^(\s*)[-\+]\s+(.+)`)
+
+func isUnorderedList(data []byte) bool {
+ return reUnorderedList.Match(data)
+}
+
+// ~~ Tables
+var reTableHeaders = regexp.MustCompile(`^[|+-]*$`)
+
+func isTable(data []byte) bool {
+ return charMatches(data[0], '|')
+}
+
+func (p *parser) generateTable(output *bytes.Buffer, data []byte) {
+ var table bytes.Buffer
+ rows := bytes.Split(bytes.Trim(data, "\n"), []byte("\n"))
+ hasTableHeaders := len(rows) > 1
+ if len(rows) > 1 {
+ hasTableHeaders = reTableHeaders.Match(rows[1])
+ }
+ tbodySet := false
+
+ for idx, row := range rows {
+ var rowBuff bytes.Buffer
+ if hasTableHeaders && idx == 0 {
+ table.WriteString("")
+ for _, cell := range bytes.Split(row[1:len(row)-1], []byte("|")) {
+ p.r.TableHeaderCell(&rowBuff, bytes.Trim(cell, " \t"), 0)
+ }
+ p.r.TableRow(&table, rowBuff.Bytes())
+ table.WriteString("\n")
+ } else if hasTableHeaders && idx == 1 {
+ continue
+ } else {
+ if !tbodySet {
+ table.WriteString("")
+ tbodySet = true
+ }
+ if !reTableHeaders.Match(row) {
+ for _, cell := range bytes.Split(row[1:len(row)-1], []byte("|")) {
+ var cellBuff bytes.Buffer
+ p.inline(&cellBuff, bytes.Trim(cell, " \t"))
+ p.r.TableCell(&rowBuff, cellBuff.Bytes(), 0)
+ }
+ p.r.TableRow(&table, rowBuff.Bytes())
+ }
+ if tbodySet && idx == len(rows)-1 {
+ table.WriteString("\n")
+ tbodySet = false
+ }
+ }
+ }
+
+ output.WriteString("\n\n")
+ output.Write(table.Bytes())
+ output.WriteString("
\n")
+}
+
+// ~~ Property Drawers
+
+func isPropertyDrawer(data []byte) bool {
+ return bytes.Equal(data, []byte(":PROPERTIES:"))
+}
+
+// ~~ Dynamic Blocks
+var reBlock = regexp.MustCompile(`^#\+(BEGIN|END)_(\w+)\s*([0-9A-Za-z_\-]*)?`)
+
+func isBlock(data []byte) bool {
+ return reBlock.Match(data)
+}
+
+// ~~ Footnotes
+var reFootnoteDef = regexp.MustCompile(`^\[fn:([\w]+)\] +(.+)`)
+
+func isFootnoteDef(data []byte) bool {
+ return reFootnoteDef.Match(data)
+}
+
+// Elements
+// ~~ Keywords
+func IsKeyword(data []byte) bool {
+ return len(data) > 2 && charMatches(data[0], '#') && charMatches(data[1], '+') && !charMatches(data[2], ' ')
+}
+
+// ~~ Comments
+func isComment(data []byte) bool {
+ return charMatches(data[0], '#') && charMatches(data[1], ' ')
+}
+
+func (p *parser) generateComment(out *bytes.Buffer, data []byte) {
+ var work bytes.Buffer
+ work.WriteString("")
+ work.WriteByte('\n')
+ out.Write(work.Bytes())
+}
+
+// ~~ Horizontal Rules
+var reHorizontalRule = regexp.MustCompile(`^\s*?-----\s?$`)
+
+func isHorizontalRule(data []byte) bool {
+ return reHorizontalRule.Match(data)
+}
+
+// ~~ Paragraphs
+func (p *parser) generateParagraph(out *bytes.Buffer, data []byte) {
+ generate := func() bool {
+ p.inline(out, bytes.Trim(data, " "))
+ return true
+ }
+ p.r.Paragraph(out, generate)
+}
+
+func (p *parser) generateList(output *bytes.Buffer, data []byte, listType string) {
+ generateList := func() bool {
+ output.WriteByte('\n')
+ p.inline(output, bytes.Trim(data, " "))
+ return true
+ }
+ switch listType {
+ case "ul":
+ p.r.List(output, generateList, 0)
+ case "ol":
+ p.r.List(output, generateList, blackfriday.LIST_TYPE_ORDERED)
+ case "dl":
+ p.r.List(output, generateList, blackfriday.LIST_TYPE_DEFINITION)
+ }
+}
+
+// Objects
+
+func (p *parser) inline(out *bytes.Buffer, data []byte) {
+ i, end := 0, 0
+
+ for i < len(data) {
+ for end < len(data) && p.inlineCallback[data[end]] == nil {
+ end++
+ }
+
+ p.r.Entity(out, data[i:end])
+
+ if end >= len(data) {
+ break
+ }
+ i = end
+
+ handler := p.inlineCallback[data[i]]
+
+ if consumed := handler(p, out, data, i); consumed > 0 {
+ i += consumed
+ end = i
+ continue
+ }
+
+ end = i + 1
+ }
+}
+
+func isAcceptablePreOpeningChar(dataIn, data []byte, offset int) bool {
+ if len(dataIn) == len(data) {
+ return true
+ }
+
+ char := dataIn[offset-1]
+ return charMatches(char, ' ') || isPreChar(char)
+}
+
+func isPreChar(char byte) bool {
+ return charMatches(char, '>') || charMatches(char, '(') || charMatches(char, '{') || charMatches(char, '[')
+}
+
+func isAcceptablePostClosingChar(char byte) bool {
+ return charMatches(char, ' ') || isTerminatingChar(char)
+}
+
+func isTerminatingChar(char byte) bool {
+ return charMatches(char, '.') || charMatches(char, ',') || charMatches(char, '?') || charMatches(char, '!') || charMatches(char, ')') || charMatches(char, '}') || charMatches(char, ']')
+}
+
+func findLastCharInInline(data []byte, char byte) int {
+ timesFound := 0
+ last := 0
+ // Start from character after the inline indicator
+ for i := 1; i < len(data); i++ {
+ if timesFound == 1 {
+ break
+ }
+ if data[i] == char {
+ if len(data) == i+1 || (len(data) > i+1 && isAcceptablePostClosingChar(data[i+1])) {
+ last = i
+ timesFound += 1
+ }
+ }
+ }
+ return last
+}
+
+func generator(p *parser, out *bytes.Buffer, dataIn []byte, offset int, char byte, doInline bool, renderer func(*bytes.Buffer, []byte)) int {
+ data := dataIn[offset:]
+ c := byte(char)
+ start := 1
+ i := start
+ if len(data) <= 1 {
+ return 0
+ }
+
+ lastCharInside := findLastCharInInline(data, c)
+
+ // Org mode spec says a non-whitespace character must immediately follow.
+ // if the current char is the marker, then there's no text between, not a candidate
+ if isSpace(data[i]) || lastCharInside == i || !isAcceptablePreOpeningChar(dataIn, data, offset) {
+ return 0
+ }
+
+ if lastCharInside > 0 {
+ var work bytes.Buffer
+ if doInline {
+ p.inline(&work, data[start:lastCharInside])
+ renderer(out, work.Bytes())
+ } else {
+ renderer(out, data[start:lastCharInside])
+ }
+ next := lastCharInside + 1
+ return next
+ }
+
+ return 0
+}
+
+// ~~ Text Markup
+func generateVerbatim(p *parser, out *bytes.Buffer, data []byte, offset int) int {
+ return generator(p, out, data, offset, '=', false, p.r.CodeSpan)
+}
+
+func generateCode(p *parser, out *bytes.Buffer, data []byte, offset int) int {
+ return generator(p, out, data, offset, '~', false, p.r.CodeSpan)
+}
+
+func generateEmphasis(p *parser, out *bytes.Buffer, data []byte, offset int) int {
+ return generator(p, out, data, offset, '/', true, p.r.Emphasis)
+}
+
+func generateUnderline(p *parser, out *bytes.Buffer, data []byte, offset int) int {
+ underline := func(out *bytes.Buffer, text []byte) {
+ out.WriteString("")
+ out.Write(text)
+ out.WriteString("")
+ }
+
+ return generator(p, out, data, offset, '_', true, underline)
+}
+
+func generateBold(p *parser, out *bytes.Buffer, data []byte, offset int) int {
+ return generator(p, out, data, offset, '*', true, p.r.DoubleEmphasis)
+}
+
+func generateStrikethrough(p *parser, out *bytes.Buffer, data []byte, offset int) int {
+ return generator(p, out, data, offset, '+', true, p.r.StrikeThrough)
+}
+
+// ~~ Images and Links (inc. Footnote)
+var reLinkOrImg = regexp.MustCompile(`\[\[(.+?)\]\[?(.*?)\]?\]`)
+
+func generateLinkOrImg(p *parser, out *bytes.Buffer, data []byte, offset int) int {
+ data = data[offset+1:]
+ start := 1
+ i := start
+ var hyperlink []byte
+ isImage := false
+ isFootnote := false
+ closedLink := false
+ hasContent := false
+
+ if bytes.Equal(data[0:3], []byte("fn:")) {
+ isFootnote = true
+ } else if data[0] != '[' {
+ return 0
+ }
+
+ if bytes.Equal(data[1:6], []byte("file:")) {
+ isImage = true
+ }
+
+ for i < len(data) {
+ currChar := data[i]
+ switch {
+ case charMatches(currChar, ']') && closedLink == false:
+ if isImage {
+ hyperlink = data[start+5 : i]
+ } else if isFootnote {
+ refid := data[start+2 : i]
+ if bytes.Equal(refid, bytes.Trim(refid, " ")) {
+ p.notes = append(p.notes, footnotes{string(refid), "DEFINITION NOT FOUND"})
+ p.r.FootnoteRef(out, refid, len(p.notes))
+ return i + 2
+ } else {
+ return 0
+ }
+ } else if bytes.Equal(data[i-4:i], []byte(".org")) {
+ orgStart := start
+ if bytes.Equal(data[orgStart:orgStart+2], []byte("./")) {
+ orgStart = orgStart + 1
+ }
+ hyperlink = data[orgStart : i-4]
+ } else {
+ hyperlink = data[start:i]
+ }
+ closedLink = true
+ case charMatches(currChar, '['):
+ start = i + 1
+ hasContent = true
+ case charMatches(currChar, ']') && closedLink == true && hasContent == true && isImage == true:
+ p.r.Image(out, hyperlink, data[start:i], data[start:i])
+ return i + 3
+ case charMatches(currChar, ']') && closedLink == true && hasContent == true:
+ var tmpBuf bytes.Buffer
+ p.inline(&tmpBuf, data[start:i])
+ p.r.Link(out, hyperlink, tmpBuf.Bytes(), tmpBuf.Bytes())
+ return i + 3
+ case charMatches(currChar, ']') && closedLink == true && hasContent == false && isImage == true:
+ p.r.Image(out, hyperlink, hyperlink, hyperlink)
+ return i + 2
+ case charMatches(currChar, ']') && closedLink == true && hasContent == false:
+ p.r.Link(out, hyperlink, hyperlink, hyperlink)
+ return i + 2
+ }
+ i++
+ }
+
+ return 0
+}
+
+// Helpers
+func skipChar(data []byte, start int, char byte) int {
+ i := start
+ for i < len(data) && charMatches(data[i], char) {
+ i++
+ }
+ return i
+}
+
+func isSpace(char byte) bool {
+ return charMatches(char, ' ')
+}
+
+func isEmpty(data []byte) bool {
+ if len(data) == 0 {
+ return true
+ }
+
+ for i := 0; i < len(data) && !charMatches(data[i], '\n'); i++ {
+ if !charMatches(data[i], ' ') && !charMatches(data[i], '\t') {
+ return false
+ }
+ }
+ return true
+}
+
+func charMatches(a byte, b byte) bool {
+ return a == b
+}
diff --git a/vendor/github.com/chaseadamsio/goorgeous/gopher.gif b/vendor/github.com/chaseadamsio/goorgeous/gopher.gif
new file mode 100644
index 0000000000..be7567e3cf
Binary files /dev/null and b/vendor/github.com/chaseadamsio/goorgeous/gopher.gif differ
diff --git a/vendor/github.com/chaseadamsio/goorgeous/gopher_small.gif b/vendor/github.com/chaseadamsio/goorgeous/gopher_small.gif
new file mode 100644
index 0000000000..1cd31fdd0c
Binary files /dev/null and b/vendor/github.com/chaseadamsio/goorgeous/gopher_small.gif differ
diff --git a/vendor/github.com/chaseadamsio/goorgeous/header.go b/vendor/github.com/chaseadamsio/goorgeous/header.go
new file mode 100644
index 0000000000..66e8b99321
--- /dev/null
+++ b/vendor/github.com/chaseadamsio/goorgeous/header.go
@@ -0,0 +1,70 @@
+package goorgeous
+
+import (
+ "bufio"
+ "bytes"
+ "regexp"
+ "strings"
+)
+
+// ExtractOrgHeaders finds and returns all of the headers
+// from a bufio.Reader and returns them as their own byte slice
+func ExtractOrgHeaders(r *bufio.Reader) (fm []byte, err error) {
+ var out bytes.Buffer
+ endOfHeaders := true
+ for endOfHeaders {
+ p, err := r.Peek(2)
+ if err != nil {
+ return nil, err
+ }
+ if !charMatches(p[0], '#') && !charMatches(p[1], '+') {
+ endOfHeaders = false
+ break
+ }
+ line, _, err := r.ReadLine()
+ if err != nil {
+ return nil, err
+ }
+ out.Write(line)
+ out.WriteByte('\n')
+ }
+ return out.Bytes(), nil
+}
+
+var reHeader = regexp.MustCompile(`^#\+(\w+?): (.*)`)
+
+// OrgHeaders find all of the headers from a byte slice and returns
+// them as a map of string interface
+func OrgHeaders(input []byte) (map[string]interface{}, error) {
+ out := make(map[string]interface{})
+ scanner := bufio.NewScanner(bytes.NewReader(input))
+
+ for scanner.Scan() {
+ data := scanner.Bytes()
+ if !charMatches(data[0], '#') && !charMatches(data[1], '+') {
+ return out, nil
+ }
+ matches := reHeader.FindSubmatch(data)
+
+ if len(matches) < 3 {
+ continue
+ }
+
+ key := string(matches[1])
+ val := matches[2]
+ switch {
+ case strings.ToLower(key) == "tags" || strings.ToLower(key) == "categories" || strings.ToLower(key) == "aliases":
+ bTags := bytes.Split(val, []byte(" "))
+ tags := make([]string, len(bTags))
+ for idx, tag := range bTags {
+ tags[idx] = string(tag)
+ }
+ out[key] = tags
+ default:
+ out[key] = string(val)
+ }
+
+ }
+ return out, nil
+
+}
diff --git a/vendor/vendor.json b/vendor/vendor.json
index d9cb18cd1a..101c72c217 100644
--- a/vendor/vendor.json
+++ b/vendor/vendor.json
@@ -299,6 +299,12 @@
"revision": "fb1f79c6b65acda83063cbc69f6bba1522558bfc",
"revisionTime": "2016-01-17T19:21:50Z"
},
+ {
+ "checksumSHA1": "x1svIugw39oEZGU5/HMUHzgRUZM=",
+ "path": "github.com/chaseadamsio/goorgeous",
+ "revision": "098da33fde5f9220736531b3cb26a2dec86a8367",
+ "revisionTime": "2017-09-01T13:22:37Z"
+ },
{
"checksumSHA1": "agNqSytP0indDCoGizlMyC1L/m4=",
"path": "github.com/coreos/etcd/error",