diff --git a/.circleci/config.yml b/.circleci/config.yml deleted file mode 100644 index 97c75c684a..0000000000 --- a/.circleci/config.yml +++ /dev/null @@ -1,38 +0,0 @@ -version: 2 - -jobs: - - validate: - working_directory: /work - docker: [{image: 'docker:20.10-git'}] - environment: - DOCKER_BUILDKIT: 1 - steps: - - checkout - - setup_remote_docker: - version: 20.10.6 - reusable: true - exclusive: false - - run: - name: "Docker version" - command: docker version - - run: - name: "Docker info" - command: docker info - - run: - name: "Validate - build image" - command: | - rm -f .dockerignore # include .git - docker build --progress=plain -f dockerfiles/Dockerfile.dev --tag cli-builder-with-git:$CIRCLE_BUILD_NUM . - - run: - name: "Validate Vendor, Docs, and Code Generation" - command: | - docker run --rm cli-builder-with-git:$CIRCLE_BUILD_NUM \ - make ci-validate - no_output_timeout: 15m - -workflows: - version: 2 - ci: - jobs: - - validate diff --git a/.dockerignore b/.dockerignore index b6b792ec3c..57b2e28bf1 100644 --- a/.dockerignore +++ b/.dockerignore @@ -2,8 +2,13 @@ /cli/winresources/versioninfo.json /cli/winresources/*.syso /man/man*/ -/docs/yaml/gen/ +/man/vendor/ +/man/go.sum +/docs/yaml/ +/docs/vendor/ +/docs/go.sum profile.out # top-level go.mod is not meant to be checked in /go.mod +/go.sum diff --git a/.github/workflows/validate.yml b/.github/workflows/validate.yml index 401fb12bfb..d05b25249b 100644 --- a/.github/workflows/validate.yml +++ b/.github/workflows/validate.yml @@ -20,6 +20,7 @@ jobs: - lint - shellcheck - validate-vendor + - update-authors # ensure authors update target runs fine steps: - name: Checkout @@ -31,3 +32,23 @@ jobs: uses: docker/bake-action@v1 with: targets: ${{ matrix.target }} + + validate-make: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + target: + - yamldocs # ensure yamldocs target runs fine + - manpages # ensure manpages target runs fine + steps: + - + name: Checkout + uses: actions/checkout@v2 + with: + fetch-depth: 0 + - + name: Run + shell: 'script --return --quiet --command "bash {0}"' + run: | + make -f docker.Makefile ${{ matrix.target }} diff --git a/.gitignore b/.gitignore index 7122f485a5..a7462428a4 100644 --- a/.gitignore +++ b/.gitignore @@ -10,11 +10,8 @@ Thumbs.db /build/ /cli/winresources/versioninfo.json /cli/winresources/*.syso -/man/man1/ -/man/man5/ -/man/man8/ -/docs/yaml/gen/ profile.out # top-level go.mod is not meant to be checked in /go.mod +/go.sum diff --git a/Makefile b/Makefile index ff68f19063..afd88b5998 100644 --- a/Makefile +++ b/Makefile @@ -11,7 +11,7 @@ _:=$(shell ./scripts/warn-outside-container $(MAKECMDGOALS)) .PHONY: clean clean: ## remove build artifacts - rm -rf ./build/* cli/winresources/rsrc_* ./man/man[1-9] docs/yaml/gen + rm -rf ./build/* man/man[1-9] docs/yaml .PHONY: test test: test-unit ## run tests @@ -77,8 +77,3 @@ yamldocs: ## generate documentation YAML files consumed by docs repo .PHONY: help help: ## print this help @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z0-9_-]+:.*?## / {gsub("\\\\n",sprintf("\n%22c",""), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) - -.PHONY: ci-validate -ci-validate: - time make manpages - time make yamldocs diff --git a/README.md b/README.md index 4ce0d6ed98..0a63f23dce 100644 --- a/README.md +++ b/README.md @@ -1,9 +1,8 @@ # Docker CLI [![PkgGoDev](https://img.shields.io/badge/go.dev-docs-007d9c?logo=go&logoColor=white)](https://pkg.go.dev/github.com/docker/cli) -[![Build Status](https://img.shields.io/github/workflow/status/docker/cli/build?logo=github)](https://github.com/docker/cli/actions?query=workflow%3Abuild) -[![Test Status](https://img.shields.io/github/workflow/status/docker/cli/test?logo=github)](https://github.com/docker/cli/actions?query=workflow%3Atest) -[![CircleCI Status](https://img.shields.io/circleci/build/github/docker/cli/master?logo=circleci)](https://circleci.com/gh/docker/cli/tree/master) +[![Build Status](https://img.shields.io/github/workflow/status/docker/cli/build?label=build&logo=github)](https://github.com/docker/cli/actions?query=workflow%3Abuild) +[![Test Status](https://img.shields.io/github/workflow/status/docker/cli/test?label=test&logo=github)](https://github.com/docker/cli/actions?query=workflow%3Atest) [![Go Report Card](https://goreportcard.com/badge/github.com/docker/cli)](https://goreportcard.com/report/github.com/docker/cli) [![Codecov](https://codecov.io/gh/docker/cli/branch/master/graph/badge.svg)](https://codecov.io/gh/docker/cli) diff --git a/docker-bake.hcl b/docker-bake.hcl index 731ed0cc64..53842a4f98 100644 --- a/docker-bake.hcl +++ b/docker-bake.hcl @@ -1,3 +1,6 @@ +variable "GO_VERSION" { + default = "1.16.11" +} variable "VERSION" { default = "" } @@ -16,11 +19,19 @@ variable "COMPANY_NAME" { default = "" } +target "_common" { + args = { + GO_VERSION = GO_VERSION + BUILDKIT_CONTEXT_KEEP_GIT_DIR = 1 + } +} + group "default" { targets = ["binary"] } target "binary" { + inherits = ["_common"] target = "binary" platforms = ["local"] output = ["build"] @@ -40,6 +51,7 @@ target "dynbinary" { } target "plugins" { + inherits = ["_common"] target = "plugins" platforms = ["local"] output = ["build"] @@ -67,30 +79,35 @@ target "plugins-cross" { } target "lint" { + inherits = ["_common"] dockerfile = "./dockerfiles/Dockerfile.lint" target = "lint" output = ["type=cacheonly"] } target "shellcheck" { + inherits = ["_common"] dockerfile = "./dockerfiles/Dockerfile.shellcheck" target = "shellcheck" output = ["type=cacheonly"] } target "validate-vendor" { + inherits = ["_common"] dockerfile = "./dockerfiles/Dockerfile.vendor" target = "validate" output = ["type=cacheonly"] } target "update-vendor" { + inherits = ["_common"] dockerfile = "./dockerfiles/Dockerfile.vendor" target = "update" output = ["."] } target "mod-outdated" { + inherits = ["_common"] dockerfile = "./dockerfiles/Dockerfile.vendor" target = "outdated" args = { @@ -100,6 +117,20 @@ target "mod-outdated" { output = ["type=cacheonly"] } +target "validate-authors" { + inherits = ["_common"] + dockerfile = "./dockerfiles/Dockerfile.authors" + target = "validate" + output = ["type=cacheonly"] +} + +target "update-authors" { + inherits = ["_common"] + dockerfile = "./dockerfiles/Dockerfile.authors" + target = "update" + output = ["."] +} + target "test" { target = "test" output = ["type=cacheonly"] diff --git a/docker.Makefile b/docker.Makefile index 18f3159973..e1848a4fc7 100644 --- a/docker.Makefile +++ b/docker.Makefile @@ -97,7 +97,7 @@ mod-outdated: ## check outdated dependencies .PHONY: authors authors: ## generate AUTHORS file from git history - $(DOCKER_RUN) -it $(DEV_DOCKER_IMAGE_NAME) make authors + docker buildx bake update-authors .PHONY: manpages manpages: build_docker_image ## generate man pages from go source and markdown diff --git a/dockerfiles/Dockerfile.authors b/dockerfiles/Dockerfile.authors new file mode 100644 index 0000000000..71cd1decbf --- /dev/null +++ b/dockerfiles/Dockerfile.authors @@ -0,0 +1,23 @@ +# syntax=docker/dockerfile:1.3-labs + +FROM alpine:3.14 AS gen +RUN apk add --no-cache bash git +WORKDIR /src +RUN --mount=type=bind,target=. \ + mkdir /out && ./scripts/docs/generate-authors.sh /out + +FROM scratch AS update +COPY --from=gen /out / + +FROM gen AS validate +RUN --mount=type=bind,target=.,rw <&2 'ERROR: Authors result differs. Please update with "make -f docker.Makefile authors"' + echo "$diff" + exit 1 +fi +EOT diff --git a/docs/.gitignore b/docs/.gitignore new file mode 100644 index 0000000000..aea9c0212c --- /dev/null +++ b/docs/.gitignore @@ -0,0 +1,3 @@ +/vendor +/yaml +/go.sum diff --git a/docs/README.md b/docs/README.md index a161b8903a..e4725474a1 100644 --- a/docs/README.md +++ b/docs/README.md @@ -28,3 +28,9 @@ the place to edit them. The docs in the general repo are open-source and we appreciate your feedback and pull requests! + +# Generate docs + +```shell +$ make -f docker.Makefile yamldocs +``` diff --git a/docs/generate.go b/docs/generate.go new file mode 100644 index 0000000000..a95dd42a81 --- /dev/null +++ b/docs/generate.go @@ -0,0 +1,67 @@ +// This file is intended for use with "go run"; it isn't really part of the package. + +// +build docsgen + +package main + +import ( + "log" + "os" + + clidocstool "github.com/docker/cli-docs-tool" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/commands" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +const defaultSourcePath = "docs/reference/commandline/" + +type options struct { + source string + target string +} + +func gen(opts *options) error { + log.SetFlags(0) + + dockerCLI, err := command.NewDockerCli() + if err != nil { + return err + } + cmd := &cobra.Command{ + Use: "docker [OPTIONS] COMMAND [ARG...]", + Short: "The base command for the Docker CLI.", + } + commands.AddCommands(cmd, dockerCLI) + + c, err := clidocstool.New(clidocstool.Options{ + Root: cmd, + SourceDir: opts.source, + TargetDir: opts.target, + Plugin: false, + }) + if err != nil { + return err + } + + return c.GenYamlTree(cmd) +} + +func run() error { + opts := &options{} + flags := pflag.NewFlagSet(os.Args[0], pflag.ContinueOnError) + flags.StringVar(&opts.source, "source", defaultSourcePath, "Docs source folder") + flags.StringVar(&opts.target, "target", defaultSourcePath, "Docs target folder") + if err := flags.Parse(os.Args[1:]); err != nil { + return err + } + return gen(opts) +} + +func main() { + if err := run(); err != nil { + log.Printf("ERROR: %+v", err) + os.Exit(1) + } +} diff --git a/docs/go.mod b/docs/go.mod new file mode 100644 index 0000000000..82b428db7e --- /dev/null +++ b/docs/go.mod @@ -0,0 +1,13 @@ +module github.com/docker/cli/docs + +// dummy go.mod to avoid dealing with dependencies specific +// to docs generation and not really part of the project. + +go 1.16 + +//require ( +// github.com/docker/cli v0.0.0+incompatible +// github.com/docker/cli-docs-tool v0.3.0 +//) +// +//replace github.com/docker/cli v0.0.0+incompatible => ../ diff --git a/docs/tools.go b/docs/tools.go new file mode 100644 index 0000000000..23f0eaff75 --- /dev/null +++ b/docs/tools.go @@ -0,0 +1,7 @@ +// +build tools + +package main + +import ( + _ "github.com/docker/cli-docs-tool" +) diff --git a/docs/yaml/Dockerfile b/docs/yaml/Dockerfile deleted file mode 100644 index 059b97a917..0000000000 --- a/docs/yaml/Dockerfile +++ /dev/null @@ -1,4 +0,0 @@ -FROM scratch -COPY docs /docs -# CMD cannot be nil so we set it to empty string -CMD [""] diff --git a/docs/yaml/generate.go b/docs/yaml/generate.go deleted file mode 100644 index 1e0472c39c..0000000000 --- a/docs/yaml/generate.go +++ /dev/null @@ -1,116 +0,0 @@ -package main - -import ( - "fmt" - "log" - "os" - "path/filepath" - "strings" - - "github.com/docker/cli/cli/command" - "github.com/docker/cli/cli/command/commands" - "github.com/spf13/cobra" - "github.com/spf13/pflag" -) - -const descriptionSourcePath = "docs/reference/commandline/" - -func generateCliYaml(opts *options) error { - dockerCLI, err := command.NewDockerCli() - if err != nil { - return err - } - cmd := &cobra.Command{ - Use: "docker [OPTIONS] COMMAND [ARG...]", - Short: "The base command for the Docker CLI.", - } - commands.AddCommands(cmd, dockerCLI) - disableFlagsInUseLine(cmd) - source := filepath.Join(opts.source, descriptionSourcePath) - fmt.Println("Markdown source:", source) - if err := loadLongDescription(cmd, source); err != nil { - return err - } - - if err := os.MkdirAll(opts.target, 0755); err != nil { - return err - } - - cmd.DisableAutoGenTag = true - return GenYamlTree(cmd, opts.target) -} - -func disableFlagsInUseLine(cmd *cobra.Command) { - visitAll(cmd, func(ccmd *cobra.Command) { - // do not add a `[flags]` to the end of the usage line. - ccmd.DisableFlagsInUseLine = true - }) -} - -// visitAll will traverse all commands from the root. -// This is different from the VisitAll of cobra.Command where only parents -// are checked. -func visitAll(root *cobra.Command, fn func(*cobra.Command)) { - for _, cmd := range root.Commands() { - visitAll(cmd, fn) - } - fn(root) -} - -func loadLongDescription(parentCmd *cobra.Command, path string) error { - for _, cmd := range parentCmd.Commands() { - if cmd.HasSubCommands() { - if err := loadLongDescription(cmd, path); err != nil { - return err - } - } - name := cmd.CommandPath() - log.Println("INFO: Generating docs for", name) - if i := strings.Index(name, " "); i >= 0 { - // remove root command / binary name - name = name[i+1:] - } - if name == "" { - continue - } - mdFile := strings.ReplaceAll(name, " ", "_") + ".md" - fullPath := filepath.Join(path, mdFile) - content, err := os.ReadFile(fullPath) - if os.IsNotExist(err) { - log.Printf("WARN: %s does not exist, skipping\n", mdFile) - continue - } - if err != nil { - return err - } - applyDescriptionAndExamples(cmd, string(content)) - } - return nil -} - -type options struct { - source string - target string -} - -func parseArgs() (*options, error) { - opts := &options{} - cwd, _ := os.Getwd() - flags := pflag.NewFlagSet(os.Args[0], pflag.ContinueOnError) - flags.StringVar(&opts.source, "root", cwd, "Path to project root") - flags.StringVar(&opts.target, "target", "/tmp", "Target path for generated yaml files") - err := flags.Parse(os.Args[1:]) - return opts, err -} - -func main() { - opts, err := parseArgs() - if err != nil { - log.Println(err) - } - fmt.Println("Project root: ", opts.source) - fmt.Println("YAML output dir:", opts.target) - if err := generateCliYaml(opts); err != nil { - log.Println("Failed to generate yaml files:", err) - } -} diff --git a/docs/yaml/markdown.go b/docs/yaml/markdown.go deleted file mode 100644 index 0ad78613a8..0000000000 --- a/docs/yaml/markdown.go +++ /dev/null @@ -1,73 +0,0 @@ -package main - -import ( - "regexp" - "strings" - "unicode" -) - -var ( - // mdHeading matches MarkDown H1..h6 headings. Note that this regex may produce - // false positives for (e.g.) comments in code-blocks (# this is a comment), - // so should not be used as a generic regex for other purposes. - mdHeading = regexp.MustCompile(`^([#]{1,6})\s(.*)$`) - // htmlAnchor matches inline HTML anchors. This is intended to only match anchors - // for our use-case; DO NOT consider using this as a generic regex, or at least - // not before reading https://stackoverflow.com/a/1732454/1811501. - htmlAnchor = regexp.MustCompile(`\s*`) -) - -// getSections returns all H2 sections by title (lowercase) -func getSections(mdString string) map[string]string { - parsedContent := strings.Split("\n"+mdString, "\n## ") - sections := make(map[string]string, len(parsedContent)) - for _, s := range parsedContent { - if strings.HasPrefix(s, "#") { - // not a H2 Section - continue - } - parts := strings.SplitN(s, "\n", 2) - if len(parts) == 2 { - sections[strings.ToLower(parts[0])] = parts[1] - } - } - return sections -} - -// cleanupMarkDown cleans up the MarkDown passed in mdString for inclusion in -// YAML. It removes trailing whitespace and substitutes tabs for four spaces -// to prevent YAML switching to use "compact" form; ("line1 \nline\t2\n") -// which, although equivalent, is hard to read. -func cleanupMarkDown(mdString string) (md string, anchors []string) { - // remove leading/trailing whitespace, and replace tabs in the whole content - mdString = strings.TrimSpace(mdString) - mdString = strings.ReplaceAll(mdString, "\t", " ") - mdString = strings.ReplaceAll(mdString, "https://docs.docker.com", "") - - var id string - // replace trailing whitespace per line, and handle custom anchors - lines := strings.Split(mdString, "\n") - for i := 0; i < len(lines); i++ { - lines[i] = strings.TrimRightFunc(lines[i], unicode.IsSpace) - lines[i], id = convertHTMLAnchor(lines[i]) - if id != "" { - anchors = append(anchors, id) - } - } - return strings.Join(lines, "\n"), anchors -} - -// convertHTMLAnchor converts inline anchor-tags in headings () -// to an extended-markdown property ({#myanchor}). Extended Markdown properties -// are not supported in GitHub Flavored Markdown, but are supported by Jekyll, -// and lead to cleaner HTML in our docs, and prevents duplicate anchors. -// It returns the converted MarkDown heading and the custom ID (if present) -func convertHTMLAnchor(mdLine string) (md string, customID string) { - if m := mdHeading.FindStringSubmatch(mdLine); len(m) > 0 { - if a := htmlAnchor.FindStringSubmatch(m[2]); len(a) > 0 { - customID = a[1] - mdLine = m[1] + " " + htmlAnchor.ReplaceAllString(m[2], "") + " {#" + customID + "}" - } - } - return mdLine, customID -} diff --git a/docs/yaml/markdown_test.go b/docs/yaml/markdown_test.go deleted file mode 100644 index 1d244c9662..0000000000 --- a/docs/yaml/markdown_test.go +++ /dev/null @@ -1,132 +0,0 @@ -package main - -import "testing" - -func TestCleanupMarkDown(t *testing.T) { - tests := []struct { - doc, in, expected string - }{ - { - doc: "whitespace around sections", - in: ` - - ## Section start - -Some lines. -And more lines. - -`, - expected: `## Section start - -Some lines. -And more lines.`, - }, - { - doc: "lines with inline tabs", - in: `## Some Heading - -A line with tabs in it. -Tabs should be replaced by spaces`, - expected: `## Some Heading - -A line with tabs in it. -Tabs should be replaced by spaces`, - }, - { - doc: "lines with trailing spaces", - in: `## Some Heading with spaces - -This is a line. - This is an indented line - -### Some other heading - -Last line.`, - expected: `## Some Heading with spaces - -This is a line. - This is an indented line - -### Some other heading - -Last line.`, - }, - { - doc: "lines with trailing tabs", - in: `## Some Heading with tabs - -This is a line. - This is an indented line - -### Some other heading - -Last line.`, - expected: `## Some Heading with tabs - -This is a line. - This is an indented line - -### Some other heading - -Last line.`, - }, - } - for _, tc := range tests { - tc := tc - t.Run(tc.doc, func(t *testing.T) { - out, _ := cleanupMarkDown(tc.in) - if out != tc.expected { - t.Fatalf("\nexpected:\n%q\nactual:\n%q\n", tc.expected, out) - } - }) - } -} - -func TestConvertHTMLAnchor(t *testing.T) { - tests := []struct { - in, id, expected string - }{ - { - in: `# Heading 1`, - id: "heading1", - expected: `# Heading 1 {#heading1}`, - }, - { - in: `## Heading 2 `, - id: "heading2", - expected: `## Heading 2 {#heading2}`, - }, - { - in: `### Heading 3`, - id: "heading3", - expected: `### Heading 3 {#heading3}`, - }, - { - in: `#### Heading 4`, - id: "heading4", - expected: `#### Heading 4 {#heading4}`, - }, - { - in: `##### Heading 5`, - id: "heading5", - expected: `##### Heading 5 {#heading5}`, - }, - { - in: `###### hello!Heading 6`, - id: "", - expected: `###### hello!Heading 6`, - }, - } - for _, tc := range tests { - tc := tc - t.Run(tc.in, func(t *testing.T) { - out, id := convertHTMLAnchor(tc.in) - if id != tc.id { - t.Fatalf("expected: %s, actual: %s\n", tc.id, id) - } - if out != tc.expected { - t.Fatalf("\nexpected: %s\nactual: %s\n", tc.expected, out) - } - }) - } -} diff --git a/docs/yaml/yaml.go b/docs/yaml/yaml.go deleted file mode 100644 index 9fb3871917..0000000000 --- a/docs/yaml/yaml.go +++ /dev/null @@ -1,347 +0,0 @@ -package main - -import ( - "fmt" - "io" - "os" - "path/filepath" - "sort" - "strings" - - "github.com/spf13/cobra" - "github.com/spf13/pflag" - yaml "gopkg.in/yaml.v2" -) - -type cmdOption struct { - Option string - Shorthand string `yaml:",omitempty"` - ValueType string `yaml:"value_type,omitempty"` - DefaultValue string `yaml:"default_value,omitempty"` - Description string `yaml:",omitempty"` - DetailsURL string `yaml:"details_url,omitempty"` // DetailsURL contains an anchor-id or link for more information on this flag - Deprecated bool - MinAPIVersion string `yaml:"min_api_version,omitempty"` - Experimental bool - ExperimentalCLI bool - Kubernetes bool - Swarm bool - OSType string `yaml:"os_type,omitempty"` -} - -type cmdDoc struct { - Name string `yaml:"command"` - SeeAlso []string `yaml:"parent,omitempty"` - Version string `yaml:"engine_version,omitempty"` - Aliases string `yaml:",omitempty"` - Short string `yaml:",omitempty"` - Long string `yaml:",omitempty"` - Usage string `yaml:",omitempty"` - Pname string `yaml:",omitempty"` - Plink string `yaml:",omitempty"` - Cname []string `yaml:",omitempty"` - Clink []string `yaml:",omitempty"` - Options []cmdOption `yaml:",omitempty"` - InheritedOptions []cmdOption `yaml:"inherited_options,omitempty"` - Example string `yaml:"examples,omitempty"` - Deprecated bool - MinAPIVersion string `yaml:"min_api_version,omitempty"` - Experimental bool - ExperimentalCLI bool - Kubernetes bool - Swarm bool - OSType string `yaml:"os_type,omitempty"` -} - -// GenYamlTree creates yaml structured ref files -func GenYamlTree(cmd *cobra.Command, dir string) error { - emptyStr := func(s string) string { return "" } - return GenYamlTreeCustom(cmd, dir, emptyStr) -} - -// GenYamlTreeCustom creates yaml structured ref files -func GenYamlTreeCustom(cmd *cobra.Command, dir string, filePrepender func(string) string) error { - for _, c := range cmd.Commands() { - if !c.Runnable() && !c.HasAvailableSubCommands() { - // skip non-runnable commands without subcommands - // but *do* generate YAML for hidden and deprecated commands - // the YAML will have those included as metadata, so that the - // documentation repository can decide whether or not to present them - continue - } - if err := GenYamlTreeCustom(c, dir, filePrepender); err != nil { - return err - } - } - - // TODO: conditionally skip the root command (for plugins) - // - // The "root" command used in the generator is just a "stub", and only has a - // list of subcommands, but not (e.g.) global options/flags. We should fix - // that, so that the YAML file for the docker "root" command contains the - // global flags. - // - // If we're using this code to generate YAML docs for a plugin, the root- - // command is even less useful; in that case, the root command represents - // the "docker" command, and is a "dummy" with no flags, and only a single - // subcommand (the plugin's top command). For plugins, we should skip the - // root command altogether, to prevent generating a useless YAML file. - // if !cmd.HasParent() { - // return nil - // } - - basename := strings.Replace(cmd.CommandPath(), " ", "_", -1) + ".yaml" - filename := filepath.Join(dir, basename) - f, err := os.Create(filename) - if err != nil { - return err - } - defer f.Close() - - if _, err := io.WriteString(f, filePrepender(filename)); err != nil { - return err - } - return GenYamlCustom(cmd, f) -} - -// GenYamlCustom creates custom yaml output -// nolint: gocyclo -func GenYamlCustom(cmd *cobra.Command, w io.Writer) error { - const ( - // shortMaxWidth is the maximum width for the "Short" description before - // we force YAML to use multi-line syntax. The goal is to make the total - // width fit within 80 characters. This value is based on 80 characters - // minus the with of the field, colon, and whitespace ('short: '). - shortMaxWidth = 73 - - // longMaxWidth is the maximum width for the "Short" description before - // we force YAML to use multi-line syntax. The goal is to make the total - // width fit within 80 characters. This value is based on 80 characters - // minus the with of the field, colon, and whitespace ('long: '). - longMaxWidth = 74 - ) - - cliDoc := cmdDoc{ - Name: cmd.CommandPath(), - Aliases: strings.Join(cmd.Aliases, ", "), - Short: forceMultiLine(cmd.Short, shortMaxWidth), - Long: forceMultiLine(cmd.Long, longMaxWidth), - Example: cmd.Example, - Deprecated: len(cmd.Deprecated) > 0, - } - - if len(cliDoc.Long) == 0 { - cliDoc.Long = cliDoc.Short - } - - if cmd.Runnable() { - cliDoc.Usage = cmd.UseLine() - } - - // Check recursively so that, e.g., `docker stack ls` returns the same output as `docker stack` - for curr := cmd; curr != nil; curr = curr.Parent() { - if v, ok := curr.Annotations["version"]; ok && cliDoc.MinAPIVersion == "" { - cliDoc.MinAPIVersion = v - } - if _, ok := curr.Annotations["experimental"]; ok && !cliDoc.Experimental { - cliDoc.Experimental = true - } - if _, ok := curr.Annotations["experimentalCLI"]; ok && !cliDoc.ExperimentalCLI { - cliDoc.ExperimentalCLI = true - } - if _, ok := curr.Annotations["kubernetes"]; ok && !cliDoc.Kubernetes { - cliDoc.Kubernetes = true - } - if _, ok := curr.Annotations["swarm"]; ok && !cliDoc.Swarm { - cliDoc.Swarm = true - } - if o, ok := curr.Annotations["ostype"]; ok && cliDoc.OSType == "" { - cliDoc.OSType = o - } - } - - anchors := make(map[string]struct{}) - if a, ok := cmd.Annotations["anchors"]; ok && a != "" { - for _, anchor := range strings.Split(a, ",") { - anchors[anchor] = struct{}{} - } - } - - flags := cmd.NonInheritedFlags() - if flags.HasFlags() { - cliDoc.Options = genFlagResult(flags, anchors) - } - flags = cmd.InheritedFlags() - if flags.HasFlags() { - cliDoc.InheritedOptions = genFlagResult(flags, anchors) - } - - if hasSeeAlso(cmd) { - if cmd.HasParent() { - parent := cmd.Parent() - cliDoc.Pname = parent.CommandPath() - cliDoc.Plink = strings.Replace(cliDoc.Pname, " ", "_", -1) + ".yaml" - cmd.VisitParents(func(c *cobra.Command) { - if c.DisableAutoGenTag { - cmd.DisableAutoGenTag = c.DisableAutoGenTag - } - }) - } - - children := cmd.Commands() - sort.Sort(byName(children)) - - for _, child := range children { - if !child.IsAvailableCommand() || child.IsAdditionalHelpTopicCommand() { - continue - } - cliDoc.Cname = append(cliDoc.Cname, cliDoc.Name+" "+child.Name()) - cliDoc.Clink = append(cliDoc.Clink, strings.Replace(cliDoc.Name+"_"+child.Name(), " ", "_", -1)+".yaml") - } - } - - final, err := yaml.Marshal(&cliDoc) - if err != nil { - fmt.Println(err) - os.Exit(1) - } - if _, err := fmt.Fprintln(w, string(final)); err != nil { - return err - } - return nil -} - -func genFlagResult(flags *pflag.FlagSet, anchors map[string]struct{}) []cmdOption { - var ( - result []cmdOption - opt cmdOption - ) - - const ( - // shortMaxWidth is the maximum width for the "Short" description before - // we force YAML to use multi-line syntax. The goal is to make the total - // width fit within 80 characters. This value is based on 80 characters - // minus the with of the field, colon, and whitespace (' default_value: '). - defaultValueMaxWidth = 64 - - // longMaxWidth is the maximum width for the "Short" description before - // we force YAML to use multi-line syntax. The goal is to make the total - // width fit within 80 characters. This value is based on 80 characters - // minus the with of the field, colon, and whitespace (' description: '). - descriptionMaxWidth = 66 - ) - - flags.VisitAll(func(flag *pflag.Flag) { - opt = cmdOption{ - Option: flag.Name, - ValueType: flag.Value.Type(), - DefaultValue: forceMultiLine(flag.DefValue, defaultValueMaxWidth), - Description: forceMultiLine(flag.Usage, descriptionMaxWidth), - Deprecated: len(flag.Deprecated) > 0, - } - - if v, ok := flag.Annotations["docs.external.url"]; ok && len(v) > 0 { - opt.DetailsURL = strings.TrimPrefix(v[0], "https://docs.docker.com") - } else if _, ok = anchors[flag.Name]; ok { - opt.DetailsURL = "#" + flag.Name - } - - // Todo, when we mark a shorthand is deprecated, but specify an empty message. - // The flag.ShorthandDeprecated is empty as the shorthand is deprecated. - // Using len(flag.ShorthandDeprecated) > 0 can't handle this, others are ok. - if !(len(flag.ShorthandDeprecated) > 0) && len(flag.Shorthand) > 0 { - opt.Shorthand = flag.Shorthand - } - if _, ok := flag.Annotations["experimental"]; ok { - opt.Experimental = true - } - if _, ok := flag.Annotations["deprecated"]; ok { - opt.Deprecated = true - } - if v, ok := flag.Annotations["version"]; ok { - opt.MinAPIVersion = v[0] - } - if _, ok := flag.Annotations["experimentalCLI"]; ok { - opt.ExperimentalCLI = true - } - if _, ok := flag.Annotations["kubernetes"]; ok { - opt.Kubernetes = true - } - if _, ok := flag.Annotations["swarm"]; ok { - opt.Swarm = true - } - - // Note that the annotation can have multiple ostypes set, however, multiple - // values are currently not used (and unlikely will). - // - // To simplify usage of the os_type property in the YAML, and for consistency - // with the same property for commands, we're only using the first ostype that's set. - if ostypes, ok := flag.Annotations["ostype"]; ok && len(opt.OSType) == 0 && len(ostypes) > 0 { - opt.OSType = ostypes[0] - } - - result = append(result, opt) - }) - - return result -} - -// forceMultiLine appends a newline (\n) to strings that are longer than max -// to force the yaml lib to use block notation (https://yaml.org/spec/1.2/spec.html#Block) -// instead of a single-line string with newlines and tabs encoded("string\nline1\nline2"). -// -// This makes the generated YAML more readable, and easier to review changes. -// max can be used to customize the width to keep the whole line < 80 chars. -func forceMultiLine(s string, max int) string { - s = strings.TrimSpace(s) - if len(s) > max && !strings.Contains(s, "\n") { - s = s + "\n" - } - return s -} - -// Small duplication for cobra utils -func hasSeeAlso(cmd *cobra.Command) bool { - if cmd.HasParent() { - return true - } - for _, c := range cmd.Commands() { - if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() { - continue - } - return true - } - return false -} - -// applyDescriptionAndExamples fills in cmd.Long and cmd.Example with the -// "Description" and "Examples" H2 sections in mdString (if present). -func applyDescriptionAndExamples(cmd *cobra.Command, mdString string) { - sections := getSections(mdString) - var ( - anchors []string - md string - ) - if sections["description"] != "" { - md, anchors = cleanupMarkDown(sections["description"]) - cmd.Long = md - anchors = append(anchors, md) - } - if sections["examples"] != "" { - md, anchors = cleanupMarkDown(sections["examples"]) - cmd.Example = md - anchors = append(anchors, md) - } - if len(anchors) > 0 { - if cmd.Annotations == nil { - cmd.Annotations = make(map[string]string) - } - cmd.Annotations["anchors"] = strings.Join(anchors, ",") - } -} - -type byName []*cobra.Command - -func (s byName) Len() int { return len(s) } -func (s byName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s byName) Less(i, j int) bool { return s[i].Name() < s[j].Name() } diff --git a/man/.gitignore b/man/.gitignore new file mode 100644 index 0000000000..b4dd664caf --- /dev/null +++ b/man/.gitignore @@ -0,0 +1,3 @@ +/vendor +/man* +/go.sum diff --git a/man/README.md b/man/README.md index 82dac650f9..e67c0f0033 100644 --- a/man/README.md +++ b/man/README.md @@ -12,4 +12,6 @@ Cobra command or amend the Markdown files for legacy pages. From within the project root directory run: - make manpages +```shell +$ make -f docker.Makefile manpages +``` diff --git a/man/generate.go b/man/generate.go index ac58c382fa..4a75dcf082 100644 --- a/man/generate.go +++ b/man/generate.go @@ -1,3 +1,7 @@ +// This file is intended for use with "go run"; it isn't really part of the package. + +// +build manpages + package main import ( @@ -18,6 +22,8 @@ import ( const descriptionSourcePath = "man/src/" func generateManPages(opts *options) error { + log.SetFlags(0) + header := &doc.GenManHeader{ Title: "DOCKER", Section: "1", @@ -73,6 +79,7 @@ func loadLongDescription(cmd *cobra.Command, path string) error { continue } + log.Printf("INFO: %s found\n", fullpath) content, err := os.ReadFile(fullpath) if err != nil { return err diff --git a/man/go.mod b/man/go.mod new file mode 100644 index 0000000000..4615c7c9a5 --- /dev/null +++ b/man/go.mod @@ -0,0 +1,15 @@ +module github.com/docker/cli/man + +// dummy go.mod to avoid dealing with dependencies specific +// to manpages generation and not really part of the project. + +go 1.16 + +//require ( +// github.com/docker/cli v0.0.0+incompatible +// github.com/cpuguy83/go-md2man/v2 v2.0.1 +// github.com/spf13/cobra v1.2.1 +// github.com/spf13/pflag v1.0.5 +//) +// +//replace github.com/docker/cli v0.0.0+incompatible => ../ diff --git a/man/import.go b/man/import.go deleted file mode 100644 index d48127f985..0000000000 --- a/man/import.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build never - -package main - -// Not used, but required for generating other man pages. -// Import it here so that the package is included by go modules. -import _ "github.com/cpuguy83/go-md2man/v2" diff --git a/man/md2man-all.sh b/man/md2man-all.sh deleted file mode 100755 index 46c7b8f08e..0000000000 --- a/man/md2man-all.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env bash -set -e - -# get into this script's directory -cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" - -[ "$1" = '-q' ] || { - set -x - pwd -} - -for FILE in *.md; do - base="$(basename "$FILE")" - name="${base%.md}" - num="${name##*.}" - if [ -z "$num" -o "$name" = "$num" ]; then - # skip files that aren't of the format xxxx.N.md (like README.md) - continue - fi - mkdir -p "./man${num}" - go-md2man -in "$FILE" -out "./man${num}/${name}" -done diff --git a/man/tools.go b/man/tools.go new file mode 100644 index 0000000000..eca2bd01f4 --- /dev/null +++ b/man/tools.go @@ -0,0 +1,10 @@ +// +build tools + +package main + +import ( + _ "github.com/cpuguy83/go-md2man/v2" + _ "github.com/spf13/cobra" + _ "github.com/spf13/cobra/doc" + _ "github.com/spf13/pflag" +) diff --git a/scripts/docs/generate-authors.sh b/scripts/docs/generate-authors.sh index 620897d371..b76ef325eb 100755 --- a/scripts/docs/generate-authors.sh +++ b/scripts/docs/generate-authors.sh @@ -1,10 +1,8 @@ #!/usr/bin/env bash set -e -cd "$(dirname "$(readlink -f "${BASH_SOURCE[*]}")")/../.." - # see also ".mailmap" for how email addresses and names are deduplicated - +OUT="${1:-.}" { cat <<-'EOH' # This file lists all individuals having contributed content to the repository. @@ -12,4 +10,5 @@ cd "$(dirname "$(readlink -f "${BASH_SOURCE[*]}")")/../.." EOH echo git log --format='%aN <%aE>' | LC_ALL=C.UTF-8 sort -uf -} > AUTHORS +} > "$OUT/AUTHORS" +cat "$OUT/AUTHORS" diff --git a/scripts/docs/generate-man.sh b/scripts/docs/generate-man.sh index e312c87dd3..6afed3fa87 100755 --- a/scripts/docs/generate-man.sh +++ b/scripts/docs/generate-man.sh @@ -1,17 +1,50 @@ #!/usr/bin/env bash -# Generate man pages for docker/cli -set -eu -o pipefail -mkdir -p ./man/man1 +set -eu -if ! command -v go-md2man &> /dev/null; then - # yay, go install creates a binary named "v2" ¯\_(ツ)_/¯ - go build -o "/go/bin/go-md2man" ./vendor/github.com/cpuguy83/go-md2man/v2 -fi +: "${MD2MAN_VERSION=v2.0.1}" -# Generate man pages from cobra commands -go build -o /tmp/gen-manpages github.com/docker/cli/man -/tmp/gen-manpages --root "$(pwd)" --target "$(pwd)/man/man1" +export GO111MODULE=auto -# Generate legacy pages from markdown -./man/md2man-all.sh -q +function clean { + rm -rf "$buildir" +} + +buildir=$(mktemp -d -t docker-cli-docsgen.XXXXXXXXXX) +trap clean EXIT + +( + set -x + cp -r . "$buildir/" + cd "$buildir" + # init dummy go.mod + ./scripts/vendor init + # install go-md2man and copy man/tools.go in root folder + # to be able to fetch the required dependencies + go mod edit -modfile=vendor.mod -require=github.com/cpuguy83/go-md2man/v2@${MD2MAN_VERSION} + cp man/tools.go . + # update vendor + ./scripts/vendor update + # build gen-manpages + go build -mod=vendor -modfile=vendor.mod -tags manpages -o /tmp/gen-manpages ./man/generate.go + # build go-md2man + go build -mod=vendor -modfile=vendor.mod -o /tmp/go-md2man ./vendor/github.com/cpuguy83/go-md2man/v2 +) + +mkdir -p man/man1 +(set -x ; /tmp/gen-manpages --root "." --target "$(pwd)/man/man1") + +( + cd man + for FILE in *.md; do + base="$(basename "$FILE")" + name="${base%.md}" + num="${name##*.}" + if [ -z "$num" ] || [ "$name" = "$num" ]; then + # skip files that aren't of the format xxxx.N.md (like README.md) + continue + fi + mkdir -p "./man${num}" + (set -x ; /tmp/go-md2man -in "$FILE" -out "./man${num}/${name}") + done +) diff --git a/scripts/docs/generate-yaml.sh b/scripts/docs/generate-yaml.sh index 634876aa75..1a4cd8c0cb 100755 --- a/scripts/docs/generate-yaml.sh +++ b/scripts/docs/generate-yaml.sh @@ -1,8 +1,34 @@ #!/usr/bin/env bash -# Generate yaml for docker/cli reference docs -set -eu -o pipefail -mkdir -p docs/yaml/gen +set -eu -GO111MODULE=off go build -o build/yaml-docs-generator github.com/docker/cli/docs/yaml -build/yaml-docs-generator --root "$(pwd)" --target "$(pwd)/docs/yaml/gen" +: "${CLI_DOCS_TOOL_VERSION=v0.3.1}" + +export GO111MODULE=auto + +function clean { + rm -rf "$buildir" +} + +buildir=$(mktemp -d -t docker-cli-docsgen.XXXXXXXXXX) +trap clean EXIT + +( + set -x + cp -r . "$buildir/" + cd "$buildir" + # init dummy go.mod + ./scripts/vendor init + # install cli-docs-tool and copy docs/tools.go in root folder + # to be able to fetch the required depedencies + go mod edit -modfile=vendor.mod -require=github.com/docker/cli-docs-tool@${CLI_DOCS_TOOL_VERSION} + cp docs/tools.go . + # update vendor + ./scripts/vendor update + # build docsgen + go build -mod=vendor -modfile=vendor.mod -tags docsgen -o /tmp/docsgen ./docs/generate.go +) + +mkdir -p docs/yaml +set -x +/tmp/docsgen --source "$(pwd)/docs/reference/commandline" --target "$(pwd)/docs/yaml" diff --git a/scripts/vendor b/scripts/vendor index 83f51f628c..9041b3aa56 100755 --- a/scripts/vendor +++ b/scripts/vendor @@ -5,7 +5,7 @@ set -eu TYP=$1 usage() { - echo "usage: ./scripts/vendor " + echo "usage: ./scripts/vendor " exit 1 } @@ -13,12 +13,14 @@ if [ -z "$TYP" ]; then usage fi -# create dummy go.mod, see comment in vendor.mod -cat > go.mod < go.mod < 0 { - return blackfriday.GoToNext - } - if entering { - out(w, paraTag) - } else { - out(w, crTag) - } - case blackfriday.BlockQuote: - if entering { - out(w, quoteTag) - } else { - out(w, quoteCloseTag) - } - case blackfriday.Heading: - r.handleHeading(w, node, entering) - case blackfriday.HorizontalRule: - out(w, hruleTag) - case blackfriday.List: - r.handleList(w, node, entering) - case blackfriday.Item: - r.handleItem(w, node, entering) - case blackfriday.CodeBlock: - out(w, codeTag) - escapeSpecialChars(w, node.Literal) - out(w, codeCloseTag) - case blackfriday.Table: - r.handleTable(w, node, entering) - case blackfriday.TableHead: - case blackfriday.TableBody: - case blackfriday.TableRow: - // no action as cell entries do all the nroff formatting - return blackfriday.GoToNext - case blackfriday.TableCell: - r.handleTableCell(w, node, entering) - case blackfriday.HTMLSpan: - // ignore other HTML tags - default: - fmt.Fprintln(os.Stderr, "WARNING: go-md2man does not handle node type "+node.Type.String()) - } - return walkAction -} - -func (r *roffRenderer) handleHeading(w io.Writer, node *blackfriday.Node, entering bool) { - if entering { - switch node.Level { - case 1: - if !r.firstHeader { - out(w, titleHeader) - r.firstHeader = true - break - } - out(w, topLevelHeader) - case 2: - out(w, secondLevelHdr) - default: - out(w, otherHeader) - } - } -} - -func (r *roffRenderer) handleList(w io.Writer, node *blackfriday.Node, entering bool) { - openTag := listTag - closeTag := listCloseTag - if node.ListFlags&blackfriday.ListTypeDefinition != 0 { - // tags for definition lists handled within Item node - openTag = "" - closeTag = "" - } - if entering { - r.listDepth++ - if node.ListFlags&blackfriday.ListTypeOrdered != 0 { - r.listCounters = append(r.listCounters, 1) - } - out(w, openTag) - } else { - if node.ListFlags&blackfriday.ListTypeOrdered != 0 { - r.listCounters = r.listCounters[:len(r.listCounters)-1] - } - out(w, closeTag) - r.listDepth-- - } -} - -func (r *roffRenderer) handleItem(w io.Writer, node *blackfriday.Node, entering bool) { - if entering { - if node.ListFlags&blackfriday.ListTypeOrdered != 0 { - out(w, fmt.Sprintf(".IP \"%3d.\" 5\n", r.listCounters[len(r.listCounters)-1])) - r.listCounters[len(r.listCounters)-1]++ - } else if node.ListFlags&blackfriday.ListTypeTerm != 0 { - // DT (definition term): line just before DD (see below). - out(w, dtTag) - r.firstDD = true - } else if node.ListFlags&blackfriday.ListTypeDefinition != 0 { - // DD (definition description): line that starts with ": ". - // - // We have to distinguish between the first DD and the - // subsequent ones, as there should be no vertical - // whitespace between the DT and the first DD. - if r.firstDD { - r.firstDD = false - } else { - out(w, dd2Tag) - } - } else { - out(w, ".IP \\(bu 2\n") - } - } else { - out(w, "\n") - } -} - -func (r *roffRenderer) handleTable(w io.Writer, node *blackfriday.Node, entering bool) { - if entering { - out(w, tableStart) - // call walker to count cells (and rows?) so format section can be produced - columns := countColumns(node) - out(w, strings.Repeat("l ", columns)+"\n") - out(w, strings.Repeat("l ", columns)+".\n") - } else { - out(w, tableEnd) - } -} - -func (r *roffRenderer) handleTableCell(w io.Writer, node *blackfriday.Node, entering bool) { - if entering { - var start string - if node.Prev != nil && node.Prev.Type == blackfriday.TableCell { - start = "\t" - } - if node.IsHeader { - start += codespanTag - } else if nodeLiteralSize(node) > 30 { - start += tableCellStart - } - out(w, start) - } else { - var end string - if node.IsHeader { - end = codespanCloseTag - } else if nodeLiteralSize(node) > 30 { - end = tableCellEnd - } - if node.Next == nil && end != tableCellEnd { - // Last cell: need to carriage return if we are at the end of the - // header row and content isn't wrapped in a "tablecell" - end += crTag - } - out(w, end) - } -} - -func nodeLiteralSize(node *blackfriday.Node) int { - total := 0 - for n := node.FirstChild; n != nil; n = n.FirstChild { - total += len(n.Literal) - } - return total -} - -// because roff format requires knowing the column count before outputting any table -// data we need to walk a table tree and count the columns -func countColumns(node *blackfriday.Node) int { - var columns int - - node.Walk(func(node *blackfriday.Node, entering bool) blackfriday.WalkStatus { - switch node.Type { - case blackfriday.TableRow: - if !entering { - return blackfriday.Terminate - } - case blackfriday.TableCell: - if entering { - columns++ - } - default: - } - return blackfriday.GoToNext - }) - return columns -} - -func out(w io.Writer, output string) { - io.WriteString(w, output) // nolint: errcheck -} - -func escapeSpecialChars(w io.Writer, text []byte) { - for i := 0; i < len(text); i++ { - // escape initial apostrophe or period - if len(text) >= 1 && (text[0] == '\'' || text[0] == '.') { - out(w, "\\&") - } - - // directly copy normal characters - org := i - - for i < len(text) && text[i] != '\\' { - i++ - } - if i > org { - w.Write(text[org:i]) // nolint: errcheck - } - - // escape a character - if i >= len(text) { - break - } - - w.Write([]byte{'\\', text[i]}) // nolint: errcheck - } -} diff --git a/vendor/github.com/russross/blackfriday/v2/.gitignore b/vendor/github.com/russross/blackfriday/v2/.gitignore deleted file mode 100644 index 75623dcccb..0000000000 --- a/vendor/github.com/russross/blackfriday/v2/.gitignore +++ /dev/null @@ -1,8 +0,0 @@ -*.out -*.swp -*.8 -*.6 -_obj -_test* -markdown -tags diff --git a/vendor/github.com/russross/blackfriday/v2/.travis.yml b/vendor/github.com/russross/blackfriday/v2/.travis.yml deleted file mode 100644 index b0b525a5a8..0000000000 --- a/vendor/github.com/russross/blackfriday/v2/.travis.yml +++ /dev/null @@ -1,17 +0,0 @@ -sudo: false -language: go -go: - - "1.10.x" - - "1.11.x" - - tip -matrix: - fast_finish: true - allow_failures: - - go: tip -install: - - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). -script: - - go get -t -v ./... - - diff -u <(echo -n) <(gofmt -d -s .) - - go tool vet . - - go test -v ./... diff --git a/vendor/github.com/russross/blackfriday/v2/LICENSE.txt b/vendor/github.com/russross/blackfriday/v2/LICENSE.txt deleted file mode 100644 index 2885af3602..0000000000 --- a/vendor/github.com/russross/blackfriday/v2/LICENSE.txt +++ /dev/null @@ -1,29 +0,0 @@ -Blackfriday is distributed under the Simplified BSD License: - -> Copyright © 2011 Russ Ross -> All rights reserved. -> -> Redistribution and use in source and binary forms, with or without -> modification, are permitted provided that the following conditions -> are met: -> -> 1. Redistributions of source code must retain the above copyright -> notice, this list of conditions and the following disclaimer. -> -> 2. Redistributions in binary form must reproduce the above -> copyright notice, this list of conditions and the following -> disclaimer in the documentation and/or other materials provided with -> the distribution. -> -> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -> "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -> LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -> FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -> COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -> INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -> BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -> LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -> CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -> LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -> ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -> POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/russross/blackfriday/v2/README.md b/vendor/github.com/russross/blackfriday/v2/README.md deleted file mode 100644 index d9c08a22fc..0000000000 --- a/vendor/github.com/russross/blackfriday/v2/README.md +++ /dev/null @@ -1,335 +0,0 @@ -Blackfriday -[![Build Status][BuildV2SVG]][BuildV2URL] -[![PkgGoDev][PkgGoDevV2SVG]][PkgGoDevV2URL] -=========== - -Blackfriday is a [Markdown][1] processor implemented in [Go][2]. It -is paranoid about its input (so you can safely feed it user-supplied -data), it is fast, it supports common extensions (tables, smart -punctuation substitutions, etc.), and it is safe for all utf-8 -(unicode) input. - -HTML output is currently supported, along with Smartypants -extensions. - -It started as a translation from C of [Sundown][3]. - - -Installation ------------- - -Blackfriday is compatible with modern Go releases in module mode. -With Go installed: - - go get github.com/russross/blackfriday/v2 - -will resolve and add the package to the current development module, -then build and install it. Alternatively, you can achieve the same -if you import it in a package: - - import "github.com/russross/blackfriday/v2" - -and `go get` without parameters. - -Legacy GOPATH mode is unsupported. - - -Versions --------- - -Currently maintained and recommended version of Blackfriday is `v2`. It's being -developed on its own branch: https://github.com/russross/blackfriday/tree/v2 and the -documentation is available at -https://pkg.go.dev/github.com/russross/blackfriday/v2. - -It is `go get`-able in module mode at `github.com/russross/blackfriday/v2`. - -Version 2 offers a number of improvements over v1: - -* Cleaned up API -* A separate call to [`Parse`][4], which produces an abstract syntax tree for - the document -* Latest bug fixes -* Flexibility to easily add your own rendering extensions - -Potential drawbacks: - -* Our benchmarks show v2 to be slightly slower than v1. Currently in the - ballpark of around 15%. -* API breakage. If you can't afford modifying your code to adhere to the new API - and don't care too much about the new features, v2 is probably not for you. -* Several bug fixes are trailing behind and still need to be forward-ported to - v2. See issue [#348](https://github.com/russross/blackfriday/issues/348) for - tracking. - -If you are still interested in the legacy `v1`, you can import it from -`github.com/russross/blackfriday`. Documentation for the legacy v1 can be found -here: https://pkg.go.dev/github.com/russross/blackfriday. - - -Usage ------ - -For the most sensible markdown processing, it is as simple as getting your input -into a byte slice and calling: - -```go -output := blackfriday.Run(input) -``` - -Your input will be parsed and the output rendered with a set of most popular -extensions enabled. If you want the most basic feature set, corresponding with -the bare Markdown specification, use: - -```go -output := blackfriday.Run(input, blackfriday.WithNoExtensions()) -``` - -### Sanitize untrusted content - -Blackfriday itself does nothing to protect against malicious content. If you are -dealing with user-supplied markdown, we recommend running Blackfriday's output -through HTML sanitizer such as [Bluemonday][5]. - -Here's an example of simple usage of Blackfriday together with Bluemonday: - -```go -import ( - "github.com/microcosm-cc/bluemonday" - "github.com/russross/blackfriday/v2" -) - -// ... -unsafe := blackfriday.Run(input) -html := bluemonday.UGCPolicy().SanitizeBytes(unsafe) -``` - -### Custom options - -If you want to customize the set of options, use `blackfriday.WithExtensions`, -`blackfriday.WithRenderer` and `blackfriday.WithRefOverride`. - -### `blackfriday-tool` - -You can also check out `blackfriday-tool` for a more complete example -of how to use it. Download and install it using: - - go get github.com/russross/blackfriday-tool - -This is a simple command-line tool that allows you to process a -markdown file using a standalone program. You can also browse the -source directly on github if you are just looking for some example -code: - -* - -Note that if you have not already done so, installing -`blackfriday-tool` will be sufficient to download and install -blackfriday in addition to the tool itself. The tool binary will be -installed in `$GOPATH/bin`. This is a statically-linked binary that -can be copied to wherever you need it without worrying about -dependencies and library versions. - -### Sanitized anchor names - -Blackfriday includes an algorithm for creating sanitized anchor names -corresponding to a given input text. This algorithm is used to create -anchors for headings when `AutoHeadingIDs` extension is enabled. The -algorithm has a specification, so that other packages can create -compatible anchor names and links to those anchors. - -The specification is located at https://pkg.go.dev/github.com/russross/blackfriday/v2#hdr-Sanitized_Anchor_Names. - -[`SanitizedAnchorName`](https://pkg.go.dev/github.com/russross/blackfriday/v2#SanitizedAnchorName) exposes this functionality, and can be used to -create compatible links to the anchor names generated by blackfriday. -This algorithm is also implemented in a small standalone package at -[`github.com/shurcooL/sanitized_anchor_name`](https://pkg.go.dev/github.com/shurcooL/sanitized_anchor_name). It can be useful for clients -that want a small package and don't need full functionality of blackfriday. - - -Features --------- - -All features of Sundown are supported, including: - -* **Compatibility**. The Markdown v1.0.3 test suite passes with - the `--tidy` option. Without `--tidy`, the differences are - mostly in whitespace and entity escaping, where blackfriday is - more consistent and cleaner. - -* **Common extensions**, including table support, fenced code - blocks, autolinks, strikethroughs, non-strict emphasis, etc. - -* **Safety**. Blackfriday is paranoid when parsing, making it safe - to feed untrusted user input without fear of bad things - happening. The test suite stress tests this and there are no - known inputs that make it crash. If you find one, please let me - know and send me the input that does it. - - NOTE: "safety" in this context means *runtime safety only*. In order to - protect yourself against JavaScript injection in untrusted content, see - [this example](https://github.com/russross/blackfriday#sanitize-untrusted-content). - -* **Fast processing**. It is fast enough to render on-demand in - most web applications without having to cache the output. - -* **Thread safety**. You can run multiple parsers in different - goroutines without ill effect. There is no dependence on global - shared state. - -* **Minimal dependencies**. Blackfriday only depends on standard - library packages in Go. The source code is pretty - self-contained, so it is easy to add to any project, including - Google App Engine projects. - -* **Standards compliant**. Output successfully validates using the - W3C validation tool for HTML 4.01 and XHTML 1.0 Transitional. - - -Extensions ----------- - -In addition to the standard markdown syntax, this package -implements the following extensions: - -* **Intra-word emphasis supression**. The `_` character is - commonly used inside words when discussing code, so having - markdown interpret it as an emphasis command is usually the - wrong thing. Blackfriday lets you treat all emphasis markers as - normal characters when they occur inside a word. - -* **Tables**. Tables can be created by drawing them in the input - using a simple syntax: - - ``` - Name | Age - --------|------ - Bob | 27 - Alice | 23 - ``` - -* **Fenced code blocks**. In addition to the normal 4-space - indentation to mark code blocks, you can explicitly mark them - and supply a language (to make syntax highlighting simple). Just - mark it like this: - - ```go - func getTrue() bool { - return true - } - ``` - - You can use 3 or more backticks to mark the beginning of the - block, and the same number to mark the end of the block. - - To preserve classes of fenced code blocks while using the bluemonday - HTML sanitizer, use the following policy: - - ```go - p := bluemonday.UGCPolicy() - p.AllowAttrs("class").Matching(regexp.MustCompile("^language-[a-zA-Z0-9]+$")).OnElements("code") - html := p.SanitizeBytes(unsafe) - ``` - -* **Definition lists**. A simple definition list is made of a single-line - term followed by a colon and the definition for that term. - - Cat - : Fluffy animal everyone likes - - Internet - : Vector of transmission for pictures of cats - - Terms must be separated from the previous definition by a blank line. - -* **Footnotes**. A marker in the text that will become a superscript number; - a footnote definition that will be placed in a list of footnotes at the - end of the document. A footnote looks like this: - - This is a footnote.[^1] - - [^1]: the footnote text. - -* **Autolinking**. Blackfriday can find URLs that have not been - explicitly marked as links and turn them into links. - -* **Strikethrough**. Use two tildes (`~~`) to mark text that - should be crossed out. - -* **Hard line breaks**. With this extension enabled newlines in the input - translate into line breaks in the output. This extension is off by default. - -* **Smart quotes**. Smartypants-style punctuation substitution is - supported, turning normal double- and single-quote marks into - curly quotes, etc. - -* **LaTeX-style dash parsing** is an additional option, where `--` - is translated into `–`, and `---` is translated into - `—`. This differs from most smartypants processors, which - turn a single hyphen into an ndash and a double hyphen into an - mdash. - -* **Smart fractions**, where anything that looks like a fraction - is translated into suitable HTML (instead of just a few special - cases like most smartypant processors). For example, `4/5` - becomes `45`, which renders as - 45. - - -Other renderers ---------------- - -Blackfriday is structured to allow alternative rendering engines. Here -are a few of note: - -* [github_flavored_markdown](https://pkg.go.dev/github.com/shurcooL/github_flavored_markdown): - provides a GitHub Flavored Markdown renderer with fenced code block - highlighting, clickable heading anchor links. - - It's not customizable, and its goal is to produce HTML output - equivalent to the [GitHub Markdown API endpoint](https://developer.github.com/v3/markdown/#render-a-markdown-document-in-raw-mode), - except the rendering is performed locally. - -* [markdownfmt](https://github.com/shurcooL/markdownfmt): like gofmt, - but for markdown. - -* [LaTeX output](https://gitlab.com/ambrevar/blackfriday-latex): - renders output as LaTeX. - -* [bfchroma](https://github.com/Depado/bfchroma/): provides convenience - integration with the [Chroma](https://github.com/alecthomas/chroma) code - highlighting library. bfchroma is only compatible with v2 of Blackfriday and - provides a drop-in renderer ready to use with Blackfriday, as well as - options and means for further customization. - -* [Blackfriday-Confluence](https://github.com/kentaro-m/blackfriday-confluence): provides a [Confluence Wiki Markup](https://confluence.atlassian.com/doc/confluence-wiki-markup-251003035.html) renderer. - -* [Blackfriday-Slack](https://github.com/karriereat/blackfriday-slack): converts markdown to slack message style - - -TODO ----- - -* More unit testing -* Improve Unicode support. It does not understand all Unicode - rules (about what constitutes a letter, a punctuation symbol, - etc.), so it may fail to detect word boundaries correctly in - some instances. It is safe on all UTF-8 input. - - -License -------- - -[Blackfriday is distributed under the Simplified BSD License](LICENSE.txt) - - - [1]: https://daringfireball.net/projects/markdown/ "Markdown" - [2]: https://golang.org/ "Go Language" - [3]: https://github.com/vmg/sundown "Sundown" - [4]: https://pkg.go.dev/github.com/russross/blackfriday/v2#Parse "Parse func" - [5]: https://github.com/microcosm-cc/bluemonday "Bluemonday" - - [BuildV2SVG]: https://travis-ci.org/russross/blackfriday.svg?branch=v2 - [BuildV2URL]: https://travis-ci.org/russross/blackfriday - [PkgGoDevV2SVG]: https://pkg.go.dev/badge/github.com/russross/blackfriday/v2 - [PkgGoDevV2URL]: https://pkg.go.dev/github.com/russross/blackfriday/v2 diff --git a/vendor/github.com/russross/blackfriday/v2/block.go b/vendor/github.com/russross/blackfriday/v2/block.go deleted file mode 100644 index dcd61e6e35..0000000000 --- a/vendor/github.com/russross/blackfriday/v2/block.go +++ /dev/null @@ -1,1612 +0,0 @@ -// -// Blackfriday Markdown Processor -// Available at http://github.com/russross/blackfriday -// -// Copyright © 2011 Russ Ross . -// Distributed under the Simplified BSD License. -// See README.md for details. -// - -// -// Functions to parse block-level elements. -// - -package blackfriday - -import ( - "bytes" - "html" - "regexp" - "strings" - "unicode" -) - -const ( - charEntity = "&(?:#x[a-f0-9]{1,8}|#[0-9]{1,8}|[a-z][a-z0-9]{1,31});" - escapable = "[!\"#$%&'()*+,./:;<=>?@[\\\\\\]^_`{|}~-]" -) - -var ( - reBackslashOrAmp = regexp.MustCompile("[\\&]") - reEntityOrEscapedChar = regexp.MustCompile("(?i)\\\\" + escapable + "|" + charEntity) -) - -// Parse block-level data. -// Note: this function and many that it calls assume that -// the input buffer ends with a newline. -func (p *Markdown) block(data []byte) { - // this is called recursively: enforce a maximum depth - if p.nesting >= p.maxNesting { - return - } - p.nesting++ - - // parse out one block-level construct at a time - for len(data) > 0 { - // prefixed heading: - // - // # Heading 1 - // ## Heading 2 - // ... - // ###### Heading 6 - if p.isPrefixHeading(data) { - data = data[p.prefixHeading(data):] - continue - } - - // block of preformatted HTML: - // - //
- // ... - //
- if data[0] == '<' { - if i := p.html(data, true); i > 0 { - data = data[i:] - continue - } - } - - // title block - // - // % stuff - // % more stuff - // % even more stuff - if p.extensions&Titleblock != 0 { - if data[0] == '%' { - if i := p.titleBlock(data, true); i > 0 { - data = data[i:] - continue - } - } - } - - // blank lines. note: returns the # of bytes to skip - if i := p.isEmpty(data); i > 0 { - data = data[i:] - continue - } - - // indented code block: - // - // func max(a, b int) int { - // if a > b { - // return a - // } - // return b - // } - if p.codePrefix(data) > 0 { - data = data[p.code(data):] - continue - } - - // fenced code block: - // - // ``` go - // func fact(n int) int { - // if n <= 1 { - // return n - // } - // return n * fact(n-1) - // } - // ``` - if p.extensions&FencedCode != 0 { - if i := p.fencedCodeBlock(data, true); i > 0 { - data = data[i:] - continue - } - } - - // horizontal rule: - // - // ------ - // or - // ****** - // or - // ______ - if p.isHRule(data) { - p.addBlock(HorizontalRule, nil) - var i int - for i = 0; i < len(data) && data[i] != '\n'; i++ { - } - data = data[i:] - continue - } - - // block quote: - // - // > A big quote I found somewhere - // > on the web - if p.quotePrefix(data) > 0 { - data = data[p.quote(data):] - continue - } - - // table: - // - // Name | Age | Phone - // ------|-----|--------- - // Bob | 31 | 555-1234 - // Alice | 27 | 555-4321 - if p.extensions&Tables != 0 { - if i := p.table(data); i > 0 { - data = data[i:] - continue - } - } - - // an itemized/unordered list: - // - // * Item 1 - // * Item 2 - // - // also works with + or - - if p.uliPrefix(data) > 0 { - data = data[p.list(data, 0):] - continue - } - - // a numbered/ordered list: - // - // 1. Item 1 - // 2. Item 2 - if p.oliPrefix(data) > 0 { - data = data[p.list(data, ListTypeOrdered):] - continue - } - - // definition lists: - // - // Term 1 - // : Definition a - // : Definition b - // - // Term 2 - // : Definition c - if p.extensions&DefinitionLists != 0 { - if p.dliPrefix(data) > 0 { - data = data[p.list(data, ListTypeDefinition):] - continue - } - } - - // anything else must look like a normal paragraph - // note: this finds underlined headings, too - data = data[p.paragraph(data):] - } - - p.nesting-- -} - -func (p *Markdown) addBlock(typ NodeType, content []byte) *Node { - p.closeUnmatchedBlocks() - container := p.addChild(typ, 0) - container.content = content - return container -} - -func (p *Markdown) isPrefixHeading(data []byte) bool { - if data[0] != '#' { - return false - } - - if p.extensions&SpaceHeadings != 0 { - level := 0 - for level < 6 && level < len(data) && data[level] == '#' { - level++ - } - if level == len(data) || data[level] != ' ' { - return false - } - } - return true -} - -func (p *Markdown) prefixHeading(data []byte) int { - level := 0 - for level < 6 && level < len(data) && data[level] == '#' { - level++ - } - i := skipChar(data, level, ' ') - end := skipUntilChar(data, i, '\n') - skip := end - id := "" - if p.extensions&HeadingIDs != 0 { - j, k := 0, 0 - // find start/end of heading id - for j = i; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ { - } - for k = j + 1; k < end && data[k] != '}'; k++ { - } - // extract heading id iff found - if j < end && k < end { - id = string(data[j+2 : k]) - end = j - skip = k + 1 - for end > 0 && data[end-1] == ' ' { - end-- - } - } - } - for end > 0 && data[end-1] == '#' { - if isBackslashEscaped(data, end-1) { - break - } - end-- - } - for end > 0 && data[end-1] == ' ' { - end-- - } - if end > i { - if id == "" && p.extensions&AutoHeadingIDs != 0 { - id = SanitizedAnchorName(string(data[i:end])) - } - block := p.addBlock(Heading, data[i:end]) - block.HeadingID = id - block.Level = level - } - return skip -} - -func (p *Markdown) isUnderlinedHeading(data []byte) int { - // test of level 1 heading - if data[0] == '=' { - i := skipChar(data, 1, '=') - i = skipChar(data, i, ' ') - if i < len(data) && data[i] == '\n' { - return 1 - } - return 0 - } - - // test of level 2 heading - if data[0] == '-' { - i := skipChar(data, 1, '-') - i = skipChar(data, i, ' ') - if i < len(data) && data[i] == '\n' { - return 2 - } - return 0 - } - - return 0 -} - -func (p *Markdown) titleBlock(data []byte, doRender bool) int { - if data[0] != '%' { - return 0 - } - splitData := bytes.Split(data, []byte("\n")) - var i int - for idx, b := range splitData { - if !bytes.HasPrefix(b, []byte("%")) { - i = idx // - 1 - break - } - } - - data = bytes.Join(splitData[0:i], []byte("\n")) - consumed := len(data) - data = bytes.TrimPrefix(data, []byte("% ")) - data = bytes.Replace(data, []byte("\n% "), []byte("\n"), -1) - block := p.addBlock(Heading, data) - block.Level = 1 - block.IsTitleblock = true - - return consumed -} - -func (p *Markdown) html(data []byte, doRender bool) int { - var i, j int - - // identify the opening tag - if data[0] != '<' { - return 0 - } - curtag, tagfound := p.htmlFindTag(data[1:]) - - // handle special cases - if !tagfound { - // check for an HTML comment - if size := p.htmlComment(data, doRender); size > 0 { - return size - } - - // check for an
tag - if size := p.htmlHr(data, doRender); size > 0 { - return size - } - - // no special case recognized - return 0 - } - - // look for an unindented matching closing tag - // followed by a blank line - found := false - /* - closetag := []byte("\n") - j = len(curtag) + 1 - for !found { - // scan for a closing tag at the beginning of a line - if skip := bytes.Index(data[j:], closetag); skip >= 0 { - j += skip + len(closetag) - } else { - break - } - - // see if it is the only thing on the line - if skip := p.isEmpty(data[j:]); skip > 0 { - // see if it is followed by a blank line/eof - j += skip - if j >= len(data) { - found = true - i = j - } else { - if skip := p.isEmpty(data[j:]); skip > 0 { - j += skip - found = true - i = j - } - } - } - } - */ - - // if not found, try a second pass looking for indented match - // but not if tag is "ins" or "del" (following original Markdown.pl) - if !found && curtag != "ins" && curtag != "del" { - i = 1 - for i < len(data) { - i++ - for i < len(data) && !(data[i-1] == '<' && data[i] == '/') { - i++ - } - - if i+2+len(curtag) >= len(data) { - break - } - - j = p.htmlFindEnd(curtag, data[i-1:]) - - if j > 0 { - i += j - 1 - found = true - break - } - } - } - - if !found { - return 0 - } - - // the end of the block has been found - if doRender { - // trim newlines - end := i - for end > 0 && data[end-1] == '\n' { - end-- - } - finalizeHTMLBlock(p.addBlock(HTMLBlock, data[:end])) - } - - return i -} - -func finalizeHTMLBlock(block *Node) { - block.Literal = block.content - block.content = nil -} - -// HTML comment, lax form -func (p *Markdown) htmlComment(data []byte, doRender bool) int { - i := p.inlineHTMLComment(data) - // needs to end with a blank line - if j := p.isEmpty(data[i:]); j > 0 { - size := i + j - if doRender { - // trim trailing newlines - end := size - for end > 0 && data[end-1] == '\n' { - end-- - } - block := p.addBlock(HTMLBlock, data[:end]) - finalizeHTMLBlock(block) - } - return size - } - return 0 -} - -// HR, which is the only self-closing block tag considered -func (p *Markdown) htmlHr(data []byte, doRender bool) int { - if len(data) < 4 { - return 0 - } - if data[0] != '<' || (data[1] != 'h' && data[1] != 'H') || (data[2] != 'r' && data[2] != 'R') { - return 0 - } - if data[3] != ' ' && data[3] != '/' && data[3] != '>' { - // not an
tag after all; at least not a valid one - return 0 - } - i := 3 - for i < len(data) && data[i] != '>' && data[i] != '\n' { - i++ - } - if i < len(data) && data[i] == '>' { - i++ - if j := p.isEmpty(data[i:]); j > 0 { - size := i + j - if doRender { - // trim newlines - end := size - for end > 0 && data[end-1] == '\n' { - end-- - } - finalizeHTMLBlock(p.addBlock(HTMLBlock, data[:end])) - } - return size - } - } - return 0 -} - -func (p *Markdown) htmlFindTag(data []byte) (string, bool) { - i := 0 - for i < len(data) && isalnum(data[i]) { - i++ - } - key := string(data[:i]) - if _, ok := blockTags[key]; ok { - return key, true - } - return "", false -} - -func (p *Markdown) htmlFindEnd(tag string, data []byte) int { - // assume data[0] == '<' && data[1] == '/' already tested - if tag == "hr" { - return 2 - } - // check if tag is a match - closetag := []byte("") - if !bytes.HasPrefix(data, closetag) { - return 0 - } - i := len(closetag) - - // check that the rest of the line is blank - skip := 0 - if skip = p.isEmpty(data[i:]); skip == 0 { - return 0 - } - i += skip - skip = 0 - - if i >= len(data) { - return i - } - - if p.extensions&LaxHTMLBlocks != 0 { - return i - } - if skip = p.isEmpty(data[i:]); skip == 0 { - // following line must be blank - return 0 - } - - return i + skip -} - -func (*Markdown) isEmpty(data []byte) int { - // it is okay to call isEmpty on an empty buffer - if len(data) == 0 { - return 0 - } - - var i int - for i = 0; i < len(data) && data[i] != '\n'; i++ { - if data[i] != ' ' && data[i] != '\t' { - return 0 - } - } - if i < len(data) && data[i] == '\n' { - i++ - } - return i -} - -func (*Markdown) isHRule(data []byte) bool { - i := 0 - - // skip up to three spaces - for i < 3 && data[i] == ' ' { - i++ - } - - // look at the hrule char - if data[i] != '*' && data[i] != '-' && data[i] != '_' { - return false - } - c := data[i] - - // the whole line must be the char or whitespace - n := 0 - for i < len(data) && data[i] != '\n' { - switch { - case data[i] == c: - n++ - case data[i] != ' ': - return false - } - i++ - } - - return n >= 3 -} - -// isFenceLine checks if there's a fence line (e.g., ``` or ``` go) at the beginning of data, -// and returns the end index if so, or 0 otherwise. It also returns the marker found. -// If info is not nil, it gets set to the syntax specified in the fence line. -func isFenceLine(data []byte, info *string, oldmarker string) (end int, marker string) { - i, size := 0, 0 - - // skip up to three spaces - for i < len(data) && i < 3 && data[i] == ' ' { - i++ - } - - // check for the marker characters: ~ or ` - if i >= len(data) { - return 0, "" - } - if data[i] != '~' && data[i] != '`' { - return 0, "" - } - - c := data[i] - - // the whole line must be the same char or whitespace - for i < len(data) && data[i] == c { - size++ - i++ - } - - // the marker char must occur at least 3 times - if size < 3 { - return 0, "" - } - marker = string(data[i-size : i]) - - // if this is the end marker, it must match the beginning marker - if oldmarker != "" && marker != oldmarker { - return 0, "" - } - - // TODO(shurcooL): It's probably a good idea to simplify the 2 code paths here - // into one, always get the info string, and discard it if the caller doesn't care. - if info != nil { - infoLength := 0 - i = skipChar(data, i, ' ') - - if i >= len(data) { - if i == len(data) { - return i, marker - } - return 0, "" - } - - infoStart := i - - if data[i] == '{' { - i++ - infoStart++ - - for i < len(data) && data[i] != '}' && data[i] != '\n' { - infoLength++ - i++ - } - - if i >= len(data) || data[i] != '}' { - return 0, "" - } - - // strip all whitespace at the beginning and the end - // of the {} block - for infoLength > 0 && isspace(data[infoStart]) { - infoStart++ - infoLength-- - } - - for infoLength > 0 && isspace(data[infoStart+infoLength-1]) { - infoLength-- - } - i++ - i = skipChar(data, i, ' ') - } else { - for i < len(data) && !isverticalspace(data[i]) { - infoLength++ - i++ - } - } - - *info = strings.TrimSpace(string(data[infoStart : infoStart+infoLength])) - } - - if i == len(data) { - return i, marker - } - if i > len(data) || data[i] != '\n' { - return 0, "" - } - return i + 1, marker // Take newline into account. -} - -// fencedCodeBlock returns the end index if data contains a fenced code block at the beginning, -// or 0 otherwise. It writes to out if doRender is true, otherwise it has no side effects. -// If doRender is true, a final newline is mandatory to recognize the fenced code block. -func (p *Markdown) fencedCodeBlock(data []byte, doRender bool) int { - var info string - beg, marker := isFenceLine(data, &info, "") - if beg == 0 || beg >= len(data) { - return 0 - } - fenceLength := beg - 1 - - var work bytes.Buffer - work.Write([]byte(info)) - work.WriteByte('\n') - - for { - // safe to assume beg < len(data) - - // check for the end of the code block - fenceEnd, _ := isFenceLine(data[beg:], nil, marker) - if fenceEnd != 0 { - beg += fenceEnd - break - } - - // copy the current line - end := skipUntilChar(data, beg, '\n') + 1 - - // did we reach the end of the buffer without a closing marker? - if end >= len(data) { - return 0 - } - - // verbatim copy to the working buffer - if doRender { - work.Write(data[beg:end]) - } - beg = end - } - - if doRender { - block := p.addBlock(CodeBlock, work.Bytes()) // TODO: get rid of temp buffer - block.IsFenced = true - block.FenceLength = fenceLength - finalizeCodeBlock(block) - } - - return beg -} - -func unescapeChar(str []byte) []byte { - if str[0] == '\\' { - return []byte{str[1]} - } - return []byte(html.UnescapeString(string(str))) -} - -func unescapeString(str []byte) []byte { - if reBackslashOrAmp.Match(str) { - return reEntityOrEscapedChar.ReplaceAllFunc(str, unescapeChar) - } - return str -} - -func finalizeCodeBlock(block *Node) { - if block.IsFenced { - newlinePos := bytes.IndexByte(block.content, '\n') - firstLine := block.content[:newlinePos] - rest := block.content[newlinePos+1:] - block.Info = unescapeString(bytes.Trim(firstLine, "\n")) - block.Literal = rest - } else { - block.Literal = block.content - } - block.content = nil -} - -func (p *Markdown) table(data []byte) int { - table := p.addBlock(Table, nil) - i, columns := p.tableHeader(data) - if i == 0 { - p.tip = table.Parent - table.Unlink() - return 0 - } - - p.addBlock(TableBody, nil) - - for i < len(data) { - pipes, rowStart := 0, i - for ; i < len(data) && data[i] != '\n'; i++ { - if data[i] == '|' { - pipes++ - } - } - - if pipes == 0 { - i = rowStart - break - } - - // include the newline in data sent to tableRow - if i < len(data) && data[i] == '\n' { - i++ - } - p.tableRow(data[rowStart:i], columns, false) - } - - return i -} - -// check if the specified position is preceded by an odd number of backslashes -func isBackslashEscaped(data []byte, i int) bool { - backslashes := 0 - for i-backslashes-1 >= 0 && data[i-backslashes-1] == '\\' { - backslashes++ - } - return backslashes&1 == 1 -} - -func (p *Markdown) tableHeader(data []byte) (size int, columns []CellAlignFlags) { - i := 0 - colCount := 1 - for i = 0; i < len(data) && data[i] != '\n'; i++ { - if data[i] == '|' && !isBackslashEscaped(data, i) { - colCount++ - } - } - - // doesn't look like a table header - if colCount == 1 { - return - } - - // include the newline in the data sent to tableRow - j := i - if j < len(data) && data[j] == '\n' { - j++ - } - header := data[:j] - - // column count ignores pipes at beginning or end of line - if data[0] == '|' { - colCount-- - } - if i > 2 && data[i-1] == '|' && !isBackslashEscaped(data, i-1) { - colCount-- - } - - columns = make([]CellAlignFlags, colCount) - - // move on to the header underline - i++ - if i >= len(data) { - return - } - - if data[i] == '|' && !isBackslashEscaped(data, i) { - i++ - } - i = skipChar(data, i, ' ') - - // each column header is of form: / *:?-+:? *|/ with # dashes + # colons >= 3 - // and trailing | optional on last column - col := 0 - for i < len(data) && data[i] != '\n' { - dashes := 0 - - if data[i] == ':' { - i++ - columns[col] |= TableAlignmentLeft - dashes++ - } - for i < len(data) && data[i] == '-' { - i++ - dashes++ - } - if i < len(data) && data[i] == ':' { - i++ - columns[col] |= TableAlignmentRight - dashes++ - } - for i < len(data) && data[i] == ' ' { - i++ - } - if i == len(data) { - return - } - // end of column test is messy - switch { - case dashes < 3: - // not a valid column - return - - case data[i] == '|' && !isBackslashEscaped(data, i): - // marker found, now skip past trailing whitespace - col++ - i++ - for i < len(data) && data[i] == ' ' { - i++ - } - - // trailing junk found after last column - if col >= colCount && i < len(data) && data[i] != '\n' { - return - } - - case (data[i] != '|' || isBackslashEscaped(data, i)) && col+1 < colCount: - // something else found where marker was required - return - - case data[i] == '\n': - // marker is optional for the last column - col++ - - default: - // trailing junk found after last column - return - } - } - if col != colCount { - return - } - - p.addBlock(TableHead, nil) - p.tableRow(header, columns, true) - size = i - if size < len(data) && data[size] == '\n' { - size++ - } - return -} - -func (p *Markdown) tableRow(data []byte, columns []CellAlignFlags, header bool) { - p.addBlock(TableRow, nil) - i, col := 0, 0 - - if data[i] == '|' && !isBackslashEscaped(data, i) { - i++ - } - - for col = 0; col < len(columns) && i < len(data); col++ { - for i < len(data) && data[i] == ' ' { - i++ - } - - cellStart := i - - for i < len(data) && (data[i] != '|' || isBackslashEscaped(data, i)) && data[i] != '\n' { - i++ - } - - cellEnd := i - - // skip the end-of-cell marker, possibly taking us past end of buffer - i++ - - for cellEnd > cellStart && cellEnd-1 < len(data) && data[cellEnd-1] == ' ' { - cellEnd-- - } - - cell := p.addBlock(TableCell, data[cellStart:cellEnd]) - cell.IsHeader = header - cell.Align = columns[col] - } - - // pad it out with empty columns to get the right number - for ; col < len(columns); col++ { - cell := p.addBlock(TableCell, nil) - cell.IsHeader = header - cell.Align = columns[col] - } - - // silently ignore rows with too many cells -} - -// returns blockquote prefix length -func (p *Markdown) quotePrefix(data []byte) int { - i := 0 - for i < 3 && i < len(data) && data[i] == ' ' { - i++ - } - if i < len(data) && data[i] == '>' { - if i+1 < len(data) && data[i+1] == ' ' { - return i + 2 - } - return i + 1 - } - return 0 -} - -// blockquote ends with at least one blank line -// followed by something without a blockquote prefix -func (p *Markdown) terminateBlockquote(data []byte, beg, end int) bool { - if p.isEmpty(data[beg:]) <= 0 { - return false - } - if end >= len(data) { - return true - } - return p.quotePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0 -} - -// parse a blockquote fragment -func (p *Markdown) quote(data []byte) int { - block := p.addBlock(BlockQuote, nil) - var raw bytes.Buffer - beg, end := 0, 0 - for beg < len(data) { - end = beg - // Step over whole lines, collecting them. While doing that, check for - // fenced code and if one's found, incorporate it altogether, - // irregardless of any contents inside it - for end < len(data) && data[end] != '\n' { - if p.extensions&FencedCode != 0 { - if i := p.fencedCodeBlock(data[end:], false); i > 0 { - // -1 to compensate for the extra end++ after the loop: - end += i - 1 - break - } - } - end++ - } - if end < len(data) && data[end] == '\n' { - end++ - } - if pre := p.quotePrefix(data[beg:]); pre > 0 { - // skip the prefix - beg += pre - } else if p.terminateBlockquote(data, beg, end) { - break - } - // this line is part of the blockquote - raw.Write(data[beg:end]) - beg = end - } - p.block(raw.Bytes()) - p.finalize(block) - return end -} - -// returns prefix length for block code -func (p *Markdown) codePrefix(data []byte) int { - if len(data) >= 1 && data[0] == '\t' { - return 1 - } - if len(data) >= 4 && data[0] == ' ' && data[1] == ' ' && data[2] == ' ' && data[3] == ' ' { - return 4 - } - return 0 -} - -func (p *Markdown) code(data []byte) int { - var work bytes.Buffer - - i := 0 - for i < len(data) { - beg := i - for i < len(data) && data[i] != '\n' { - i++ - } - if i < len(data) && data[i] == '\n' { - i++ - } - - blankline := p.isEmpty(data[beg:i]) > 0 - if pre := p.codePrefix(data[beg:i]); pre > 0 { - beg += pre - } else if !blankline { - // non-empty, non-prefixed line breaks the pre - i = beg - break - } - - // verbatim copy to the working buffer - if blankline { - work.WriteByte('\n') - } else { - work.Write(data[beg:i]) - } - } - - // trim all the \n off the end of work - workbytes := work.Bytes() - eol := len(workbytes) - for eol > 0 && workbytes[eol-1] == '\n' { - eol-- - } - if eol != len(workbytes) { - work.Truncate(eol) - } - - work.WriteByte('\n') - - block := p.addBlock(CodeBlock, work.Bytes()) // TODO: get rid of temp buffer - block.IsFenced = false - finalizeCodeBlock(block) - - return i -} - -// returns unordered list item prefix -func (p *Markdown) uliPrefix(data []byte) int { - i := 0 - // start with up to 3 spaces - for i < len(data) && i < 3 && data[i] == ' ' { - i++ - } - if i >= len(data)-1 { - return 0 - } - // need one of {'*', '+', '-'} followed by a space or a tab - if (data[i] != '*' && data[i] != '+' && data[i] != '-') || - (data[i+1] != ' ' && data[i+1] != '\t') { - return 0 - } - return i + 2 -} - -// returns ordered list item prefix -func (p *Markdown) oliPrefix(data []byte) int { - i := 0 - - // start with up to 3 spaces - for i < 3 && i < len(data) && data[i] == ' ' { - i++ - } - - // count the digits - start := i - for i < len(data) && data[i] >= '0' && data[i] <= '9' { - i++ - } - if start == i || i >= len(data)-1 { - return 0 - } - - // we need >= 1 digits followed by a dot and a space or a tab - if data[i] != '.' || !(data[i+1] == ' ' || data[i+1] == '\t') { - return 0 - } - return i + 2 -} - -// returns definition list item prefix -func (p *Markdown) dliPrefix(data []byte) int { - if len(data) < 2 { - return 0 - } - i := 0 - // need a ':' followed by a space or a tab - if data[i] != ':' || !(data[i+1] == ' ' || data[i+1] == '\t') { - return 0 - } - for i < len(data) && data[i] == ' ' { - i++ - } - return i + 2 -} - -// parse ordered or unordered list block -func (p *Markdown) list(data []byte, flags ListType) int { - i := 0 - flags |= ListItemBeginningOfList - block := p.addBlock(List, nil) - block.ListFlags = flags - block.Tight = true - - for i < len(data) { - skip := p.listItem(data[i:], &flags) - if flags&ListItemContainsBlock != 0 { - block.ListData.Tight = false - } - i += skip - if skip == 0 || flags&ListItemEndOfList != 0 { - break - } - flags &= ^ListItemBeginningOfList - } - - above := block.Parent - finalizeList(block) - p.tip = above - return i -} - -// Returns true if the list item is not the same type as its parent list -func (p *Markdown) listTypeChanged(data []byte, flags *ListType) bool { - if p.dliPrefix(data) > 0 && *flags&ListTypeDefinition == 0 { - return true - } else if p.oliPrefix(data) > 0 && *flags&ListTypeOrdered == 0 { - return true - } else if p.uliPrefix(data) > 0 && (*flags&ListTypeOrdered != 0 || *flags&ListTypeDefinition != 0) { - return true - } - return false -} - -// Returns true if block ends with a blank line, descending if needed -// into lists and sublists. -func endsWithBlankLine(block *Node) bool { - // TODO: figure this out. Always false now. - for block != nil { - //if block.lastLineBlank { - //return true - //} - t := block.Type - if t == List || t == Item { - block = block.LastChild - } else { - break - } - } - return false -} - -func finalizeList(block *Node) { - block.open = false - item := block.FirstChild - for item != nil { - // check for non-final list item ending with blank line: - if endsWithBlankLine(item) && item.Next != nil { - block.ListData.Tight = false - break - } - // recurse into children of list item, to see if there are spaces - // between any of them: - subItem := item.FirstChild - for subItem != nil { - if endsWithBlankLine(subItem) && (item.Next != nil || subItem.Next != nil) { - block.ListData.Tight = false - break - } - subItem = subItem.Next - } - item = item.Next - } -} - -// Parse a single list item. -// Assumes initial prefix is already removed if this is a sublist. -func (p *Markdown) listItem(data []byte, flags *ListType) int { - // keep track of the indentation of the first line - itemIndent := 0 - if data[0] == '\t' { - itemIndent += 4 - } else { - for itemIndent < 3 && data[itemIndent] == ' ' { - itemIndent++ - } - } - - var bulletChar byte = '*' - i := p.uliPrefix(data) - if i == 0 { - i = p.oliPrefix(data) - } else { - bulletChar = data[i-2] - } - if i == 0 { - i = p.dliPrefix(data) - // reset definition term flag - if i > 0 { - *flags &= ^ListTypeTerm - } - } - if i == 0 { - // if in definition list, set term flag and continue - if *flags&ListTypeDefinition != 0 { - *flags |= ListTypeTerm - } else { - return 0 - } - } - - // skip leading whitespace on first line - for i < len(data) && data[i] == ' ' { - i++ - } - - // find the end of the line - line := i - for i > 0 && i < len(data) && data[i-1] != '\n' { - i++ - } - - // get working buffer - var raw bytes.Buffer - - // put the first line into the working buffer - raw.Write(data[line:i]) - line = i - - // process the following lines - containsBlankLine := false - sublist := 0 - codeBlockMarker := "" - -gatherlines: - for line < len(data) { - i++ - - // find the end of this line - for i < len(data) && data[i-1] != '\n' { - i++ - } - - // if it is an empty line, guess that it is part of this item - // and move on to the next line - if p.isEmpty(data[line:i]) > 0 { - containsBlankLine = true - line = i - continue - } - - // calculate the indentation - indent := 0 - indentIndex := 0 - if data[line] == '\t' { - indentIndex++ - indent += 4 - } else { - for indent < 4 && line+indent < i && data[line+indent] == ' ' { - indent++ - indentIndex++ - } - } - - chunk := data[line+indentIndex : i] - - if p.extensions&FencedCode != 0 { - // determine if in or out of codeblock - // if in codeblock, ignore normal list processing - _, marker := isFenceLine(chunk, nil, codeBlockMarker) - if marker != "" { - if codeBlockMarker == "" { - // start of codeblock - codeBlockMarker = marker - } else { - // end of codeblock. - codeBlockMarker = "" - } - } - // we are in a codeblock, write line, and continue - if codeBlockMarker != "" || marker != "" { - raw.Write(data[line+indentIndex : i]) - line = i - continue gatherlines - } - } - - // evaluate how this line fits in - switch { - // is this a nested list item? - case (p.uliPrefix(chunk) > 0 && !p.isHRule(chunk)) || - p.oliPrefix(chunk) > 0 || - p.dliPrefix(chunk) > 0: - - // to be a nested list, it must be indented more - // if not, it is either a different kind of list - // or the next item in the same list - if indent <= itemIndent { - if p.listTypeChanged(chunk, flags) { - *flags |= ListItemEndOfList - } else if containsBlankLine { - *flags |= ListItemContainsBlock - } - - break gatherlines - } - - if containsBlankLine { - *flags |= ListItemContainsBlock - } - - // is this the first item in the nested list? - if sublist == 0 { - sublist = raw.Len() - } - - // is this a nested prefix heading? - case p.isPrefixHeading(chunk): - // if the heading is not indented, it is not nested in the list - // and thus ends the list - if containsBlankLine && indent < 4 { - *flags |= ListItemEndOfList - break gatherlines - } - *flags |= ListItemContainsBlock - - // anything following an empty line is only part - // of this item if it is indented 4 spaces - // (regardless of the indentation of the beginning of the item) - case containsBlankLine && indent < 4: - if *flags&ListTypeDefinition != 0 && i < len(data)-1 { - // is the next item still a part of this list? - next := i - for next < len(data) && data[next] != '\n' { - next++ - } - for next < len(data)-1 && data[next] == '\n' { - next++ - } - if i < len(data)-1 && data[i] != ':' && data[next] != ':' { - *flags |= ListItemEndOfList - } - } else { - *flags |= ListItemEndOfList - } - break gatherlines - - // a blank line means this should be parsed as a block - case containsBlankLine: - raw.WriteByte('\n') - *flags |= ListItemContainsBlock - } - - // if this line was preceded by one or more blanks, - // re-introduce the blank into the buffer - if containsBlankLine { - containsBlankLine = false - raw.WriteByte('\n') - } - - // add the line into the working buffer without prefix - raw.Write(data[line+indentIndex : i]) - - line = i - } - - rawBytes := raw.Bytes() - - block := p.addBlock(Item, nil) - block.ListFlags = *flags - block.Tight = false - block.BulletChar = bulletChar - block.Delimiter = '.' // Only '.' is possible in Markdown, but ')' will also be possible in CommonMark - - // render the contents of the list item - if *flags&ListItemContainsBlock != 0 && *flags&ListTypeTerm == 0 { - // intermediate render of block item, except for definition term - if sublist > 0 { - p.block(rawBytes[:sublist]) - p.block(rawBytes[sublist:]) - } else { - p.block(rawBytes) - } - } else { - // intermediate render of inline item - if sublist > 0 { - child := p.addChild(Paragraph, 0) - child.content = rawBytes[:sublist] - p.block(rawBytes[sublist:]) - } else { - child := p.addChild(Paragraph, 0) - child.content = rawBytes - } - } - return line -} - -// render a single paragraph that has already been parsed out -func (p *Markdown) renderParagraph(data []byte) { - if len(data) == 0 { - return - } - - // trim leading spaces - beg := 0 - for data[beg] == ' ' { - beg++ - } - - end := len(data) - // trim trailing newline - if data[len(data)-1] == '\n' { - end-- - } - - // trim trailing spaces - for end > beg && data[end-1] == ' ' { - end-- - } - - p.addBlock(Paragraph, data[beg:end]) -} - -func (p *Markdown) paragraph(data []byte) int { - // prev: index of 1st char of previous line - // line: index of 1st char of current line - // i: index of cursor/end of current line - var prev, line, i int - tabSize := TabSizeDefault - if p.extensions&TabSizeEight != 0 { - tabSize = TabSizeDouble - } - // keep going until we find something to mark the end of the paragraph - for i < len(data) { - // mark the beginning of the current line - prev = line - current := data[i:] - line = i - - // did we find a reference or a footnote? If so, end a paragraph - // preceding it and report that we have consumed up to the end of that - // reference: - if refEnd := isReference(p, current, tabSize); refEnd > 0 { - p.renderParagraph(data[:i]) - return i + refEnd - } - - // did we find a blank line marking the end of the paragraph? - if n := p.isEmpty(current); n > 0 { - // did this blank line followed by a definition list item? - if p.extensions&DefinitionLists != 0 { - if i < len(data)-1 && data[i+1] == ':' { - return p.list(data[prev:], ListTypeDefinition) - } - } - - p.renderParagraph(data[:i]) - return i + n - } - - // an underline under some text marks a heading, so our paragraph ended on prev line - if i > 0 { - if level := p.isUnderlinedHeading(current); level > 0 { - // render the paragraph - p.renderParagraph(data[:prev]) - - // ignore leading and trailing whitespace - eol := i - 1 - for prev < eol && data[prev] == ' ' { - prev++ - } - for eol > prev && data[eol-1] == ' ' { - eol-- - } - - id := "" - if p.extensions&AutoHeadingIDs != 0 { - id = SanitizedAnchorName(string(data[prev:eol])) - } - - block := p.addBlock(Heading, data[prev:eol]) - block.Level = level - block.HeadingID = id - - // find the end of the underline - for i < len(data) && data[i] != '\n' { - i++ - } - return i - } - } - - // if the next line starts a block of HTML, then the paragraph ends here - if p.extensions&LaxHTMLBlocks != 0 { - if data[i] == '<' && p.html(current, false) > 0 { - // rewind to before the HTML block - p.renderParagraph(data[:i]) - return i - } - } - - // if there's a prefixed heading or a horizontal rule after this, paragraph is over - if p.isPrefixHeading(current) || p.isHRule(current) { - p.renderParagraph(data[:i]) - return i - } - - // if there's a fenced code block, paragraph is over - if p.extensions&FencedCode != 0 { - if p.fencedCodeBlock(current, false) > 0 { - p.renderParagraph(data[:i]) - return i - } - } - - // if there's a definition list item, prev line is a definition term - if p.extensions&DefinitionLists != 0 { - if p.dliPrefix(current) != 0 { - ret := p.list(data[prev:], ListTypeDefinition) - return ret - } - } - - // if there's a list after this, paragraph is over - if p.extensions&NoEmptyLineBeforeBlock != 0 { - if p.uliPrefix(current) != 0 || - p.oliPrefix(current) != 0 || - p.quotePrefix(current) != 0 || - p.codePrefix(current) != 0 { - p.renderParagraph(data[:i]) - return i - } - } - - // otherwise, scan to the beginning of the next line - nl := bytes.IndexByte(data[i:], '\n') - if nl >= 0 { - i += nl + 1 - } else { - i += len(data[i:]) - } - } - - p.renderParagraph(data[:i]) - return i -} - -func skipChar(data []byte, start int, char byte) int { - i := start - for i < len(data) && data[i] == char { - i++ - } - return i -} - -func skipUntilChar(text []byte, start int, char byte) int { - i := start - for i < len(text) && text[i] != char { - i++ - } - return i -} - -// SanitizedAnchorName returns a sanitized anchor name for the given text. -// -// It implements the algorithm specified in the package comment. -func SanitizedAnchorName(text string) string { - var anchorName []rune - futureDash := false - for _, r := range text { - switch { - case unicode.IsLetter(r) || unicode.IsNumber(r): - if futureDash && len(anchorName) > 0 { - anchorName = append(anchorName, '-') - } - futureDash = false - anchorName = append(anchorName, unicode.ToLower(r)) - default: - futureDash = true - } - } - return string(anchorName) -} diff --git a/vendor/github.com/russross/blackfriday/v2/doc.go b/vendor/github.com/russross/blackfriday/v2/doc.go deleted file mode 100644 index 57ff152a05..0000000000 --- a/vendor/github.com/russross/blackfriday/v2/doc.go +++ /dev/null @@ -1,46 +0,0 @@ -// Package blackfriday is a markdown processor. -// -// It translates plain text with simple formatting rules into an AST, which can -// then be further processed to HTML (provided by Blackfriday itself) or other -// formats (provided by the community). -// -// The simplest way to invoke Blackfriday is to call the Run function. It will -// take a text input and produce a text output in HTML (or other format). -// -// A slightly more sophisticated way to use Blackfriday is to create a Markdown -// processor and to call Parse, which returns a syntax tree for the input -// document. You can leverage Blackfriday's parsing for content extraction from -// markdown documents. You can assign a custom renderer and set various options -// to the Markdown processor. -// -// If you're interested in calling Blackfriday from command line, see -// https://github.com/russross/blackfriday-tool. -// -// Sanitized Anchor Names -// -// Blackfriday includes an algorithm for creating sanitized anchor names -// corresponding to a given input text. This algorithm is used to create -// anchors for headings when AutoHeadingIDs extension is enabled. The -// algorithm is specified below, so that other packages can create -// compatible anchor names and links to those anchors. -// -// The algorithm iterates over the input text, interpreted as UTF-8, -// one Unicode code point (rune) at a time. All runes that are letters (category L) -// or numbers (category N) are considered valid characters. They are mapped to -// lower case, and included in the output. All other runes are considered -// invalid characters. Invalid characters that precede the first valid character, -// as well as invalid character that follow the last valid character -// are dropped completely. All other sequences of invalid characters -// between two valid characters are replaced with a single dash character '-'. -// -// SanitizedAnchorName exposes this functionality, and can be used to -// create compatible links to the anchor names generated by blackfriday. -// This algorithm is also implemented in a small standalone package at -// github.com/shurcooL/sanitized_anchor_name. It can be useful for clients -// that want a small package and don't need full functionality of blackfriday. -package blackfriday - -// NOTE: Keep Sanitized Anchor Name algorithm in sync with package -// github.com/shurcooL/sanitized_anchor_name. -// Otherwise, users of sanitized_anchor_name will get anchor names -// that are incompatible with those generated by blackfriday. diff --git a/vendor/github.com/russross/blackfriday/v2/entities.go b/vendor/github.com/russross/blackfriday/v2/entities.go deleted file mode 100644 index a2c3edb691..0000000000 --- a/vendor/github.com/russross/blackfriday/v2/entities.go +++ /dev/null @@ -1,2236 +0,0 @@ -package blackfriday - -// Extracted from https://html.spec.whatwg.org/multipage/entities.json -var entities = map[string]bool{ - "Æ": true, - "Æ": true, - "&": true, - "&": true, - "Á": true, - "Á": true, - "Ă": true, - "Â": true, - "Â": true, - "А": true, - "𝔄": true, - "À": true, - "À": true, - "Α": true, - "Ā": true, - "⩓": true, - "Ą": true, - "𝔸": true, - "⁡": true, - "Å": true, - "Å": true, - "𝒜": true, - "≔": true, - "Ã": true, - "Ã": true, - "Ä": true, - "Ä": true, - "∖": true, - "⫧": true, - "⌆": true, - "Б": true, - "∵": true, - "ℬ": true, - "Β": true, - "𝔅": true, - "𝔹": true, - "˘": true, - "ℬ": true, - "≎": true, - "Ч": true, - "©": true, - "©": true, - "Ć": true, - "⋒": true, - "ⅅ": true, - "ℭ": true, - "Č": true, - "Ç": true, - "Ç": true, - "Ĉ": true, - "∰": true, - "Ċ": true, - "¸": true, - "·": true, - "ℭ": true, - "Χ": true, - "⊙": true, - "⊖": true, - "⊕": true, - "⊗": true, - "∲": true, - "”": true, - "’": true, - "∷": true, - "⩴": true, - "≡": true, - "∯": true, - "∮": true, - "ℂ": true, - "∐": true, - "∳": true, - "⨯": true, - "𝒞": true, - "⋓": true, - "≍": true, - "ⅅ": true, - "⤑": true, - "Ђ": true, - "Ѕ": true, - "Џ": true, - "‡": true, - "↡": true, - "⫤": true, - "Ď": true, - "Д": true, - "∇": true, - "Δ": true, - "𝔇": true, - "´": true, - "˙": true, - "˝": true, - "`": true, - "˜": true, - "⋄": true, - "ⅆ": true, - "𝔻": true, - "¨": true, - "⃜": true, - "≐": true, - "∯": true, - "¨": true, - "⇓": true, - "⇐": true, - "⇔": true, - "⫤": true, - "⟸": true, - "⟺": true, - "⟹": true, - "⇒": true, - "⊨": true, - "⇑": true, - "⇕": true, - "∥": true, - "↓": true, - "⤓": true, - "⇵": true, - "̑": true, - "⥐": true, - "⥞": true, - "↽": true, - "⥖": true, - "⥟": true, - "⇁": true, - "⥗": true, - "⊤": true, - "↧": true, - "⇓": true, - "𝒟": true, - "Đ": true, - "Ŋ": true, - "Ð": true, - "Ð": true, - "É": true, - "É": true, - "Ě": true, - "Ê": true, - "Ê": true, - "Э": true, - "Ė": true, - "𝔈": true, - "È": true, - "È": true, - "∈": true, - "Ē": true, - "◻": true, - "▫": true, - "Ę": true, - "𝔼": true, - "Ε": true, - "⩵": true, - "≂": true, - "⇌": true, - "ℰ": true, - "⩳": true, - "Η": true, - "Ë": true, - "Ë": true, - "∃": true, - "ⅇ": true, - "Ф": true, - "𝔉": true, - "◼": true, - "▪": true, - "𝔽": true, - "∀": true, - "ℱ": true, - "ℱ": true, - "Ѓ": true, - ">": true, - ">": true, - "Γ": true, - "Ϝ": true, - "Ğ": true, - "Ģ": true, - "Ĝ": true, - "Г": true, - "Ġ": true, - "𝔊": true, - "⋙": true, - "𝔾": true, - "≥": true, - "⋛": true, - "≧": true, - "⪢": true, - "≷": true, - "⩾": true, - "≳": true, - "𝒢": true, - "≫": true, - "Ъ": true, - "ˇ": true, - "^": true, - "Ĥ": true, - "ℌ": true, - "ℋ": true, - "ℍ": true, - "─": true, - "ℋ": true, - "Ħ": true, - "≎": true, - "≏": true, - "Е": true, - "IJ": true, - "Ё": true, - "Í": true, - "Í": true, - "Î": true, - "Î": true, - "И": true, - "İ": true, - "ℑ": true, - "Ì": true, - "Ì": true, - "ℑ": true, - "Ī": true, - "ⅈ": true, - "⇒": true, - "∬": true, - "∫": true, - "⋂": true, - "⁣": true, - "⁢": true, - "Į": true, - "𝕀": true, - "Ι": true, - "ℐ": true, - "Ĩ": true, - "І": true, - "Ï": true, - "Ï": true, - "Ĵ": true, - "Й": true, - "𝔍": true, - "𝕁": true, - "𝒥": true, - "Ј": true, - "Є": true, - "Х": true, - "Ќ": true, - "Κ": true, - "Ķ": true, - "К": true, - "𝔎": true, - "𝕂": true, - "𝒦": true, - "Љ": true, - "<": true, - "<": true, - "Ĺ": true, - "Λ": true, - "⟪": true, - "ℒ": true, - "↞": true, - "Ľ": true, - "Ļ": true, - "Л": true, - "⟨": true, - "←": true, - "⇤": true, - "⇆": true, - "⌈": true, - "⟦": true, - "⥡": true, - "⇃": true, - "⥙": true, - "⌊": true, - "↔": true, - "⥎": true, - "⊣": true, - "↤": true, - "⥚": true, - "⊲": true, - "⧏": true, - "⊴": true, - "⥑": true, - "⥠": true, - "↿": true, - "⥘": true, - "↼": true, - "⥒": true, - "⇐": true, - "⇔": true, - "⋚": true, - "≦": true, - "≶": true, - "⪡": true, - "⩽": true, - "≲": true, - "𝔏": true, - "⋘": true, - "⇚": true, - "Ŀ": true, - "⟵": true, - "⟷": true, - "⟶": true, - "⟸": true, - "⟺": true, - "⟹": true, - "𝕃": true, - "↙": true, - "↘": true, - "ℒ": true, - "↰": true, - "Ł": true, - "≪": true, - "⤅": true, - "М": true, - " ": true, - "ℳ": true, - "𝔐": true, - "∓": true, - "𝕄": true, - "ℳ": true, - "Μ": true, - "Њ": true, - "Ń": true, - "Ň": true, - "Ņ": true, - "Н": true, - "​": true, - "​": true, - "​": true, - "​": true, - "≫": true, - "≪": true, - " ": true, - "𝔑": true, - "⁠": true, - " ": true, - "ℕ": true, - "⫬": true, - "≢": true, - "≭": true, - "∦": true, - "∉": true, - "≠": true, - "≂̸": true, - "∄": true, - "≯": true, - "≱": true, - "≧̸": true, - "≫̸": true, - "≹": true, - "⩾̸": true, - "≵": true, - "≎̸": true, - "≏̸": true, - "⋪": true, - "⧏̸": true, - "⋬": true, - "≮": true, - "≰": true, - "≸": true, - "≪̸": true, - "⩽̸": true, - "≴": true, - "⪢̸": true, - "⪡̸": true, - "⊀": true, - "⪯̸": true, - "⋠": true, - "∌": true, - "⋫": true, - "⧐̸": true, - "⋭": true, - "⊏̸": true, - "⋢": true, - "⊐̸": true, - "⋣": true, - "⊂⃒": true, - "⊈": true, - "⊁": true, - "⪰̸": true, - "⋡": true, - "≿̸": true, - "⊃⃒": true, - "⊉": true, - "≁": true, - "≄": true, - "≇": true, - "≉": true, - "∤": true, - "𝒩": true, - "Ñ": true, - "Ñ": true, - "Ν": true, - "Œ": true, - "Ó": true, - "Ó": true, - "Ô": true, - "Ô": true, - "О": true, - "Ő": true, - "𝔒": true, - "Ò": true, - "Ò": true, - "Ō": true, - "Ω": true, - "Ο": true, - "𝕆": true, - "“": true, - "‘": true, - "⩔": true, - "𝒪": true, - "Ø": true, - "Ø": true, - "Õ": true, - "Õ": true, - "⨷": true, - "Ö": true, - "Ö": true, - "‾": true, - "⏞": true, - "⎴": true, - "⏜": true, - "∂": true, - "П": true, - "𝔓": true, - "Φ": true, - "Π": true, - "±": true, - "ℌ": true, - "ℙ": true, - "⪻": true, - "≺": true, - "⪯": true, - "≼": true, - "≾": true, - "″": true, - "∏": true, - "∷": true, - "∝": true, - "𝒫": true, - "Ψ": true, - """: true, - """: true, - "𝔔": true, - "ℚ": true, - "𝒬": true, - "⤐": true, - "®": true, - "®": true, - "Ŕ": true, - "⟫": true, - "↠": true, - "⤖": true, - "Ř": true, - "Ŗ": true, - "Р": true, - "ℜ": true, - "∋": true, - "⇋": true, - "⥯": true, - "ℜ": true, - "Ρ": true, - "⟩": true, - "→": true, - "⇥": true, - "⇄": true, - "⌉": true, - "⟧": true, - "⥝": true, - "⇂": true, - "⥕": true, - "⌋": true, - "⊢": true, - "↦": true, - "⥛": true, - "⊳": true, - "⧐": true, - "⊵": true, - "⥏": true, - "⥜": true, - "↾": true, - "⥔": true, - "⇀": true, - "⥓": true, - "⇒": true, - "ℝ": true, - "⥰": true, - "⇛": true, - "ℛ": true, - "↱": true, - "⧴": true, - "Щ": true, - "Ш": true, - "Ь": true, - "Ś": true, - "⪼": true, - "Š": true, - "Ş": true, - "Ŝ": true, - "С": true, - "𝔖": true, - "↓": true, - "←": true, - "→": true, - "↑": true, - "Σ": true, - "∘": true, - "𝕊": true, - "√": true, - "□": true, - "⊓": true, - "⊏": true, - "⊑": true, - "⊐": true, - "⊒": true, - "⊔": true, - "𝒮": true, - "⋆": true, - "⋐": true, - "⋐": true, - "⊆": true, - "≻": true, - "⪰": true, - "≽": true, - "≿": true, - "∋": true, - "∑": true, - "⋑": true, - "⊃": true, - "⊇": true, - "⋑": true, - "Þ": true, - "Þ": true, - "™": true, - "Ћ": true, - "Ц": true, - " ": true, - "Τ": true, - "Ť": true, - "Ţ": true, - "Т": true, - "𝔗": true, - "∴": true, - "Θ": true, - "  ": true, - " ": true, - "∼": true, - "≃": true, - "≅": true, - "≈": true, - "𝕋": true, - "⃛": true, - "𝒯": true, - "Ŧ": true, - "Ú": true, - "Ú": true, - "↟": true, - "⥉": true, - "Ў": true, - "Ŭ": true, - "Û": true, - "Û": true, - "У": true, - "Ű": true, - "𝔘": true, - "Ù": true, - "Ù": true, - "Ū": true, - "_": true, - "⏟": true, - "⎵": true, - "⏝": true, - "⋃": true, - "⊎": true, - "Ų": true, - "𝕌": true, - "↑": true, - "⤒": true, - "⇅": true, - "↕": true, - "⥮": true, - "⊥": true, - "↥": true, - "⇑": true, - "⇕": true, - "↖": true, - "↗": true, - "ϒ": true, - "Υ": true, - "Ů": true, - "𝒰": true, - "Ũ": true, - "Ü": true, - "Ü": true, - "⊫": true, - "⫫": true, - "В": true, - "⊩": true, - "⫦": true, - "⋁": true, - "‖": true, - "‖": true, - "∣": true, - "|": true, - "❘": true, - "≀": true, - " ": true, - "𝔙": true, - "𝕍": true, - "𝒱": true, - "⊪": true, - "Ŵ": true, - "⋀": true, - "𝔚": true, - "𝕎": true, - "𝒲": true, - "𝔛": true, - "Ξ": true, - "𝕏": true, - "𝒳": true, - "Я": true, - "Ї": true, - "Ю": true, - "Ý": true, - "Ý": true, - "Ŷ": true, - "Ы": true, - "𝔜": true, - "𝕐": true, - "𝒴": true, - "Ÿ": true, - "Ж": true, - "Ź": true, - "Ž": true, - "З": true, - "Ż": true, - "​": true, - "Ζ": true, - "ℨ": true, - "ℤ": true, - "𝒵": true, - "á": true, - "á": true, - "ă": true, - "∾": true, - "∾̳": true, - "∿": true, - "â": true, - "â": true, - "´": true, - "´": true, - "а": true, - "æ": true, - "æ": true, - "⁡": true, - "𝔞": true, - "à": true, - "à": true, - "ℵ": true, - "ℵ": true, - "α": true, - "ā": true, - "⨿": true, - "&": true, - "&": true, - "∧": true, - "⩕": true, - "⩜": true, - "⩘": true, - "⩚": true, - "∠": true, - "⦤": true, - "∠": true, - "∡": true, - "⦨": true, - "⦩": true, - "⦪": true, - "⦫": true, - "⦬": true, - "⦭": true, - "⦮": true, - "⦯": true, - "∟": true, - "⊾": true, - "⦝": true, - "∢": true, - "Å": true, - "⍼": true, - "ą": true, - "𝕒": true, - "≈": true, - "⩰": true, - "⩯": true, - "≊": true, - "≋": true, - "'": true, - "≈": true, - "≊": true, - "å": true, - "å": true, - "𝒶": true, - "*": true, - "≈": true, - "≍": true, - "ã": true, - "ã": true, - "ä": true, - "ä": true, - "∳": true, - "⨑": true, - "⫭": true, - "≌": true, - "϶": true, - "‵": true, - "∽": true, - "⋍": true, - "⊽": true, - "⌅": true, - "⌅": true, - "⎵": true, - "⎶": true, - "≌": true, - "б": true, - "„": true, - "∵": true, - "∵": true, - "⦰": true, - "϶": true, - "ℬ": true, - "β": true, - "ℶ": true, - "≬": true, - "𝔟": true, - "⋂": true, - "◯": true, - "⋃": true, - "⨀": true, - "⨁": true, - "⨂": true, - "⨆": true, - "★": true, - "▽": true, - "△": true, - "⨄": true, - "⋁": true, - "⋀": true, - "⤍": true, - "⧫": true, - "▪": true, - "▴": true, - "▾": true, - "◂": true, - "▸": true, - "␣": true, - "▒": true, - "░": true, - "▓": true, - "█": true, - "=⃥": true, - "≡⃥": true, - "⌐": true, - "𝕓": true, - "⊥": true, - "⊥": true, - "⋈": true, - "╗": true, - "╔": true, - "╖": true, - "╓": true, - "═": true, - "╦": true, - "╩": true, - "╤": true, - "╧": true, - "╝": true, - "╚": true, - "╜": true, - "╙": true, - "║": true, - "╬": true, - "╣": true, - "╠": true, - "╫": true, - "╢": true, - "╟": true, - "⧉": true, - "╕": true, - "╒": true, - "┐": true, - "┌": true, - "─": true, - "╥": true, - "╨": true, - "┬": true, - "┴": true, - "⊟": true, - "⊞": true, - "⊠": true, - "╛": true, - "╘": true, - "┘": true, - "└": true, - "│": true, - "╪": true, - "╡": true, - "╞": true, - "┼": true, - "┤": true, - "├": true, - "‵": true, - "˘": true, - "¦": true, - "¦": true, - "𝒷": true, - "⁏": true, - "∽": true, - "⋍": true, - "\": true, - "⧅": true, - "⟈": true, - "•": true, - "•": true, - "≎": true, - "⪮": true, - "≏": true, - "≏": true, - "ć": true, - "∩": true, - "⩄": true, - "⩉": true, - "⩋": true, - "⩇": true, - "⩀": true, - "∩︀": true, - "⁁": true, - "ˇ": true, - "⩍": true, - "č": true, - "ç": true, - "ç": true, - "ĉ": true, - "⩌": true, - "⩐": true, - "ċ": true, - "¸": true, - "¸": true, - "⦲": true, - "¢": true, - "¢": true, - "·": true, - "𝔠": true, - "ч": true, - "✓": true, - "✓": true, - "χ": true, - "○": true, - "⧃": true, - "ˆ": true, - "≗": true, - "↺": true, - "↻": true, - "®": true, - "Ⓢ": true, - "⊛": true, - "⊚": true, - "⊝": true, - "≗": true, - "⨐": true, - "⫯": true, - "⧂": true, - "♣": true, - "♣": true, - ":": true, - "≔": true, - "≔": true, - ",": true, - "@": true, - "∁": true, - "∘": true, - "∁": true, - "ℂ": true, - "≅": true, - "⩭": true, - "∮": true, - "𝕔": true, - "∐": true, - "©": true, - "©": true, - "℗": true, - "↵": true, - "✗": true, - "𝒸": true, - "⫏": true, - "⫑": true, - "⫐": true, - "⫒": true, - "⋯": true, - "⤸": true, - "⤵": true, - "⋞": true, - "⋟": true, - "↶": true, - "⤽": true, - "∪": true, - "⩈": true, - "⩆": true, - "⩊": true, - "⊍": true, - "⩅": true, - "∪︀": true, - "↷": true, - "⤼": true, - "⋞": true, - "⋟": true, - "⋎": true, - "⋏": true, - "¤": true, - "¤": true, - "↶": true, - "↷": true, - "⋎": true, - "⋏": true, - "∲": true, - "∱": true, - "⌭": true, - "⇓": true, - "⥥": true, - "†": true, - "ℸ": true, - "↓": true, - "‐": true, - "⊣": true, - "⤏": true, - "˝": true, - "ď": true, - "д": true, - "ⅆ": true, - "‡": true, - "⇊": true, - "⩷": true, - "°": true, - "°": true, - "δ": true, - "⦱": true, - "⥿": true, - "𝔡": true, - "⇃": true, - "⇂": true, - "⋄": true, - "⋄": true, - "♦": true, - "♦": true, - "¨": true, - "ϝ": true, - "⋲": true, - "÷": true, - "÷": true, - "÷": true, - "⋇": true, - "⋇": true, - "ђ": true, - "⌞": true, - "⌍": true, - "$": true, - "𝕕": true, - "˙": true, - "≐": true, - "≑": true, - "∸": true, - "∔": true, - "⊡": true, - "⌆": true, - "↓": true, - "⇊": true, - "⇃": true, - "⇂": true, - "⤐": true, - "⌟": true, - "⌌": true, - "𝒹": true, - "ѕ": true, - "⧶": true, - "đ": true, - "⋱": true, - "▿": true, - "▾": true, - "⇵": true, - "⥯": true, - "⦦": true, - "џ": true, - "⟿": true, - "⩷": true, - "≑": true, - "é": true, - "é": true, - "⩮": true, - "ě": true, - "≖": true, - "ê": true, - "ê": true, - "≕": true, - "э": true, - "ė": true, - "ⅇ": true, - "≒": true, - "𝔢": true, - "⪚": true, - "è": true, - "è": true, - "⪖": true, - "⪘": true, - "⪙": true, - "⏧": true, - "ℓ": true, - "⪕": true, - "⪗": true, - "ē": true, - "∅": true, - "∅": true, - "∅": true, - " ": true, - " ": true, - " ": true, - "ŋ": true, - " ": true, - "ę": true, - "𝕖": true, - "⋕": true, - "⧣": true, - "⩱": true, - "ε": true, - "ε": true, - "ϵ": true, - "≖": true, - "≕": true, - "≂": true, - "⪖": true, - "⪕": true, - "=": true, - "≟": true, - "≡": true, - "⩸": true, - "⧥": true, - "≓": true, - "⥱": true, - "ℯ": true, - "≐": true, - "≂": true, - "η": true, - "ð": true, - "ð": true, - "ë": true, - "ë": true, - "€": true, - "!": true, - "∃": true, - "ℰ": true, - "ⅇ": true, - "≒": true, - "ф": true, - "♀": true, - "ffi": true, - "ff": true, - "ffl": true, - "𝔣": true, - "fi": true, - "fj": true, - "♭": true, - "fl": true, - "▱": true, - "ƒ": true, - "𝕗": true, - "∀": true, - "⋔": true, - "⫙": true, - "⨍": true, - "½": true, - "½": true, - "⅓": true, - "¼": true, - "¼": true, - "⅕": true, - "⅙": true, - "⅛": true, - "⅔": true, - "⅖": true, - "¾": true, - "¾": true, - "⅗": true, - "⅜": true, - "⅘": true, - "⅚": true, - "⅝": true, - "⅞": true, - "⁄": true, - "⌢": true, - "𝒻": true, - "≧": true, - "⪌": true, - "ǵ": true, - "γ": true, - "ϝ": true, - "⪆": true, - "ğ": true, - "ĝ": true, - "г": true, - "ġ": true, - "≥": true, - "⋛": true, - "≥": true, - "≧": true, - "⩾": true, - "⩾": true, - "⪩": true, - "⪀": true, - "⪂": true, - "⪄": true, - "⋛︀": true, - "⪔": true, - "𝔤": true, - "≫": true, - "⋙": true, - "ℷ": true, - "ѓ": true, - "≷": true, - "⪒": true, - "⪥": true, - "⪤": true, - "≩": true, - "⪊": true, - "⪊": true, - "⪈": true, - "⪈": true, - "≩": true, - "⋧": true, - "𝕘": true, - "`": true, - "ℊ": true, - "≳": true, - "⪎": true, - "⪐": true, - ">": true, - ">": true, - "⪧": true, - "⩺": true, - "⋗": true, - "⦕": true, - "⩼": true, - "⪆": true, - "⥸": true, - "⋗": true, - "⋛": true, - "⪌": true, - "≷": true, - "≳": true, - "≩︀": true, - "≩︀": true, - "⇔": true, - " ": true, - "½": true, - "ℋ": true, - "ъ": true, - "↔": true, - "⥈": true, - "↭": true, - "ℏ": true, - "ĥ": true, - "♥": true, - "♥": true, - "…": true, - "⊹": true, - "𝔥": true, - "⤥": true, - "⤦": true, - "⇿": true, - "∻": true, - "↩": true, - "↪": true, - "𝕙": true, - "―": true, - "𝒽": true, - "ℏ": true, - "ħ": true, - "⁃": true, - "‐": true, - "í": true, - "í": true, - "⁣": true, - "î": true, - "î": true, - "и": true, - "е": true, - "¡": true, - "¡": true, - "⇔": true, - "𝔦": true, - "ì": true, - "ì": true, - "ⅈ": true, - "⨌": true, - "∭": true, - "⧜": true, - "℩": true, - "ij": true, - "ī": true, - "ℑ": true, - "ℐ": true, - "ℑ": true, - "ı": true, - "⊷": true, - "Ƶ": true, - "∈": true, - "℅": true, - "∞": true, - "⧝": true, - "ı": true, - "∫": true, - "⊺": true, - "ℤ": true, - "⊺": true, - "⨗": true, - "⨼": true, - "ё": true, - "į": true, - "𝕚": true, - "ι": true, - "⨼": true, - "¿": true, - "¿": true, - "𝒾": true, - "∈": true, - "⋹": true, - "⋵": true, - "⋴": true, - "⋳": true, - "∈": true, - "⁢": true, - "ĩ": true, - "і": true, - "ï": true, - "ï": true, - "ĵ": true, - "й": true, - "𝔧": true, - "ȷ": true, - "𝕛": true, - "𝒿": true, - "ј": true, - "є": true, - "κ": true, - "ϰ": true, - "ķ": true, - "к": true, - "𝔨": true, - "ĸ": true, - "х": true, - "ќ": true, - "𝕜": true, - "𝓀": true, - "⇚": true, - "⇐": true, - "⤛": true, - "⤎": true, - "≦": true, - "⪋": true, - "⥢": true, - "ĺ": true, - "⦴": true, - "ℒ": true, - "λ": true, - "⟨": true, - "⦑": true, - "⟨": true, - "⪅": true, - "«": true, - "«": true, - "←": true, - "⇤": true, - "⤟": true, - "⤝": true, - "↩": true, - "↫": true, - "⤹": true, - "⥳": true, - "↢": true, - "⪫": true, - "⤙": true, - "⪭": true, - "⪭︀": true, - "⤌": true, - "❲": true, - "{": true, - "[": true, - "⦋": true, - "⦏": true, - "⦍": true, - "ľ": true, - "ļ": true, - "⌈": true, - "{": true, - "л": true, - "⤶": true, - "“": true, - "„": true, - "⥧": true, - "⥋": true, - "↲": true, - "≤": true, - "←": true, - "↢": true, - "↽": true, - "↼": true, - "⇇": true, - "↔": true, - "⇆": true, - "⇋": true, - "↭": true, - "⋋": true, - "⋚": true, - "≤": true, - "≦": true, - "⩽": true, - "⩽": true, - "⪨": true, - "⩿": true, - "⪁": true, - "⪃": true, - "⋚︀": true, - "⪓": true, - "⪅": true, - "⋖": true, - "⋚": true, - "⪋": true, - "≶": true, - "≲": true, - "⥼": true, - "⌊": true, - "𝔩": true, - "≶": true, - "⪑": true, - "↽": true, - "↼": true, - "⥪": true, - "▄": true, - "љ": true, - "≪": true, - "⇇": true, - "⌞": true, - "⥫": true, - "◺": true, - "ŀ": true, - "⎰": true, - "⎰": true, - "≨": true, - "⪉": true, - "⪉": true, - "⪇": true, - "⪇": true, - "≨": true, - "⋦": true, - "⟬": true, - "⇽": true, - "⟦": true, - "⟵": true, - "⟷": true, - "⟼": true, - "⟶": true, - "↫": true, - "↬": true, - "⦅": true, - "𝕝": true, - "⨭": true, - "⨴": true, - "∗": true, - "_": true, - "◊": true, - "◊": true, - "⧫": true, - "(": true, - "⦓": true, - "⇆": true, - "⌟": true, - "⇋": true, - "⥭": true, - "‎": true, - "⊿": true, - "‹": true, - "𝓁": true, - "↰": true, - "≲": true, - "⪍": true, - "⪏": true, - "[": true, - "‘": true, - "‚": true, - "ł": true, - "<": true, - "<": true, - "⪦": true, - "⩹": true, - "⋖": true, - "⋋": true, - "⋉": true, - "⥶": true, - "⩻": true, - "⦖": true, - "◃": true, - "⊴": true, - "◂": true, - "⥊": true, - "⥦": true, - "≨︀": true, - "≨︀": true, - "∺": true, - "¯": true, - "¯": true, - "♂": true, - "✠": true, - "✠": true, - "↦": true, - "↦": true, - "↧": true, - "↤": true, - "↥": true, - "▮": true, - "⨩": true, - "м": true, - "—": true, - "∡": true, - "𝔪": true, - "℧": true, - "µ": true, - "µ": true, - "∣": true, - "*": true, - "⫰": true, - "·": true, - "·": true, - "−": true, - "⊟": true, - "∸": true, - "⨪": true, - "⫛": true, - "…": true, - "∓": true, - "⊧": true, - "𝕞": true, - "∓": true, - "𝓂": true, - "∾": true, - "μ": true, - "⊸": true, - "⊸": true, - "⋙̸": true, - "≫⃒": true, - "≫̸": true, - "⇍": true, - "⇎": true, - "⋘̸": true, - "≪⃒": true, - "≪̸": true, - "⇏": true, - "⊯": true, - "⊮": true, - "∇": true, - "ń": true, - "∠⃒": true, - "≉": true, - "⩰̸": true, - "≋̸": true, - "ʼn": true, - "≉": true, - "♮": true, - "♮": true, - "ℕ": true, - " ": true, - " ": true, - "≎̸": true, - "≏̸": true, - "⩃": true, - "ň": true, - "ņ": true, - "≇": true, - "⩭̸": true, - "⩂": true, - "н": true, - "–": true, - "≠": true, - "⇗": true, - "⤤": true, - "↗": true, - "↗": true, - "≐̸": true, - "≢": true, - "⤨": true, - "≂̸": true, - "∄": true, - "∄": true, - "𝔫": true, - "≧̸": true, - "≱": true, - "≱": true, - "≧̸": true, - "⩾̸": true, - "⩾̸": true, - "≵": true, - "≯": true, - "≯": true, - "⇎": true, - "↮": true, - "⫲": true, - "∋": true, - "⋼": true, - "⋺": true, - "∋": true, - "њ": true, - "⇍": true, - "≦̸": true, - "↚": true, - "‥": true, - "≰": true, - "↚": true, - "↮": true, - "≰": true, - "≦̸": true, - "⩽̸": true, - "⩽̸": true, - "≮": true, - "≴": true, - "≮": true, - "⋪": true, - "⋬": true, - "∤": true, - "𝕟": true, - "¬": true, - "¬": true, - "∉": true, - "⋹̸": true, - "⋵̸": true, - "∉": true, - "⋷": true, - "⋶": true, - "∌": true, - "∌": true, - "⋾": true, - "⋽": true, - "∦": true, - "∦": true, - "⫽⃥": true, - "∂̸": true, - "⨔": true, - "⊀": true, - "⋠": true, - "⪯̸": true, - "⊀": true, - "⪯̸": true, - "⇏": true, - "↛": true, - "⤳̸": true, - "↝̸": true, - "↛": true, - "⋫": true, - "⋭": true, - "⊁": true, - "⋡": true, - "⪰̸": true, - "𝓃": true, - "∤": true, - "∦": true, - "≁": true, - "≄": true, - "≄": true, - "∤": true, - "∦": true, - "⋢": true, - "⋣": true, - "⊄": true, - "⫅̸": true, - "⊈": true, - "⊂⃒": true, - "⊈": true, - "⫅̸": true, - "⊁": true, - "⪰̸": true, - "⊅": true, - "⫆̸": true, - "⊉": true, - "⊃⃒": true, - "⊉": true, - "⫆̸": true, - "≹": true, - "ñ": true, - "ñ": true, - "≸": true, - "⋪": true, - "⋬": true, - "⋫": true, - "⋭": true, - "ν": true, - "#": true, - "№": true, - " ": true, - "⊭": true, - "⤄": true, - "≍⃒": true, - "⊬": true, - "≥⃒": true, - ">⃒": true, - "⧞": true, - "⤂": true, - "≤⃒": true, - "<⃒": true, - "⊴⃒": true, - "⤃": true, - "⊵⃒": true, - "∼⃒": true, - "⇖": true, - "⤣": true, - "↖": true, - "↖": true, - "⤧": true, - "Ⓢ": true, - "ó": true, - "ó": true, - "⊛": true, - "⊚": true, - "ô": true, - "ô": true, - "о": true, - "⊝": true, - "ő": true, - "⨸": true, - "⊙": true, - "⦼": true, - "œ": true, - "⦿": true, - "𝔬": true, - "˛": true, - "ò": true, - "ò": true, - "⧁": true, - "⦵": true, - "Ω": true, - "∮": true, - "↺": true, - "⦾": true, - "⦻": true, - "‾": true, - "⧀": true, - "ō": true, - "ω": true, - "ο": true, - "⦶": true, - "⊖": true, - "𝕠": true, - "⦷": true, - "⦹": true, - "⊕": true, - "∨": true, - "↻": true, - "⩝": true, - "ℴ": true, - "ℴ": true, - "ª": true, - "ª": true, - "º": true, - "º": true, - "⊶": true, - "⩖": true, - "⩗": true, - "⩛": true, - "ℴ": true, - "ø": true, - "ø": true, - "⊘": true, - "õ": true, - "õ": true, - "⊗": true, - "⨶": true, - "ö": true, - "ö": true, - "⌽": true, - "∥": true, - "¶": true, - "¶": true, - "∥": true, - "⫳": true, - "⫽": true, - "∂": true, - "п": true, - "%": true, - ".": true, - "‰": true, - "⊥": true, - "‱": true, - "𝔭": true, - "φ": true, - "ϕ": true, - "ℳ": true, - "☎": true, - "π": true, - "⋔": true, - "ϖ": true, - "ℏ": true, - "ℎ": true, - "ℏ": true, - "+": true, - "⨣": true, - "⊞": true, - "⨢": true, - "∔": true, - "⨥": true, - "⩲": true, - "±": true, - "±": true, - "⨦": true, - "⨧": true, - "±": true, - "⨕": true, - "𝕡": true, - "£": true, - "£": true, - "≺": true, - "⪳": true, - "⪷": true, - "≼": true, - "⪯": true, - "≺": true, - "⪷": true, - "≼": true, - "⪯": true, - "⪹": true, - "⪵": true, - "⋨": true, - "≾": true, - "′": true, - "ℙ": true, - "⪵": true, - "⪹": true, - "⋨": true, - "∏": true, - "⌮": true, - "⌒": true, - "⌓": true, - "∝": true, - "∝": true, - "≾": true, - "⊰": true, - "𝓅": true, - "ψ": true, - " ": true, - "𝔮": true, - "⨌": true, - "𝕢": true, - "⁗": true, - "𝓆": true, - "ℍ": true, - "⨖": true, - "?": true, - "≟": true, - """: true, - """: true, - "⇛": true, - "⇒": true, - "⤜": true, - "⤏": true, - "⥤": true, - "∽̱": true, - "ŕ": true, - "√": true, - "⦳": true, - "⟩": true, - "⦒": true, - "⦥": true, - "⟩": true, - "»": true, - "»": true, - "→": true, - "⥵": true, - "⇥": true, - "⤠": true, - "⤳": true, - "⤞": true, - "↪": true, - "↬": true, - "⥅": true, - "⥴": true, - "↣": true, - "↝": true, - "⤚": true, - "∶": true, - "ℚ": true, - "⤍": true, - "❳": true, - "}": true, - "]": true, - "⦌": true, - "⦎": true, - "⦐": true, - "ř": true, - "ŗ": true, - "⌉": true, - "}": true, - "р": true, - "⤷": true, - "⥩": true, - "”": true, - "”": true, - "↳": true, - "ℜ": true, - "ℛ": true, - "ℜ": true, - "ℝ": true, - "▭": true, - "®": true, - "®": true, - "⥽": true, - "⌋": true, - "𝔯": true, - "⇁": true, - "⇀": true, - "⥬": true, - "ρ": true, - "ϱ": true, - "→": true, - "↣": true, - "⇁": true, - "⇀": true, - "⇄": true, - "⇌": true, - "⇉": true, - "↝": true, - "⋌": true, - "˚": true, - "≓": true, - "⇄": true, - "⇌": true, - "‏": true, - "⎱": true, - "⎱": true, - "⫮": true, - "⟭": true, - "⇾": true, - "⟧": true, - "⦆": true, - "𝕣": true, - "⨮": true, - "⨵": true, - ")": true, - "⦔": true, - "⨒": true, - "⇉": true, - "›": true, - "𝓇": true, - "↱": true, - "]": true, - "’": true, - "’": true, - "⋌": true, - "⋊": true, - "▹": true, - "⊵": true, - "▸": true, - "⧎": true, - "⥨": true, - "℞": true, - "ś": true, - "‚": true, - "≻": true, - "⪴": true, - "⪸": true, - "š": true, - "≽": true, - "⪰": true, - "ş": true, - "ŝ": true, - "⪶": true, - "⪺": true, - "⋩": true, - "⨓": true, - "≿": true, - "с": true, - "⋅": true, - "⊡": true, - "⩦": true, - "⇘": true, - "⤥": true, - "↘": true, - "↘": true, - "§": true, - "§": true, - ";": true, - "⤩": true, - "∖": true, - "∖": true, - "✶": true, - "𝔰": true, - "⌢": true, - "♯": true, - "щ": true, - "ш": true, - "∣": true, - "∥": true, - "­": true, - "­": true, - "σ": true, - "ς": true, - "ς": true, - "∼": true, - "⩪": true, - "≃": true, - "≃": true, - "⪞": true, - "⪠": true, - "⪝": true, - "⪟": true, - "≆": true, - "⨤": true, - "⥲": true, - "←": true, - "∖": true, - "⨳": true, - "⧤": true, - "∣": true, - "⌣": true, - "⪪": true, - "⪬": true, - "⪬︀": true, - "ь": true, - "/": true, - "⧄": true, - "⌿": true, - "𝕤": true, - "♠": true, - "♠": true, - "∥": true, - "⊓": true, - "⊓︀": true, - "⊔": true, - "⊔︀": true, - "⊏": true, - "⊑": true, - "⊏": true, - "⊑": true, - "⊐": true, - "⊒": true, - "⊐": true, - "⊒": true, - "□": true, - "□": true, - "▪": true, - "▪": true, - "→": true, - "𝓈": true, - "∖": true, - "⌣": true, - "⋆": true, - "☆": true, - "★": true, - "ϵ": true, - "ϕ": true, - "¯": true, - "⊂": true, - "⫅": true, - "⪽": true, - "⊆": true, - "⫃": true, - "⫁": true, - "⫋": true, - "⊊": true, - "⪿": true, - "⥹": true, - "⊂": true, - "⊆": true, - "⫅": true, - "⊊": true, - "⫋": true, - "⫇": true, - "⫕": true, - "⫓": true, - "≻": true, - "⪸": true, - "≽": true, - "⪰": true, - "⪺": true, - "⪶": true, - "⋩": true, - "≿": true, - "∑": true, - "♪": true, - "¹": true, - "¹": true, - "²": true, - "²": true, - "³": true, - "³": true, - "⊃": true, - "⫆": true, - "⪾": true, - "⫘": true, - "⊇": true, - "⫄": true, - "⟉": true, - "⫗": true, - "⥻": true, - "⫂": true, - "⫌": true, - "⊋": true, - "⫀": true, - "⊃": true, - "⊇": true, - "⫆": true, - "⊋": true, - "⫌": true, - "⫈": true, - "⫔": true, - "⫖": true, - "⇙": true, - "⤦": true, - "↙": true, - "↙": true, - "⤪": true, - "ß": true, - "ß": true, - "⌖": true, - "τ": true, - "⎴": true, - "ť": true, - "ţ": true, - "т": true, - "⃛": true, - "⌕": true, - "𝔱": true, - "∴": true, - "∴": true, - "θ": true, - "ϑ": true, - "ϑ": true, - "≈": true, - "∼": true, - " ": true, - "≈": true, - "∼": true, - "þ": true, - "þ": true, - "˜": true, - "×": true, - "×": true, - "⊠": true, - "⨱": true, - "⨰": true, - "∭": true, - "⤨": true, - "⊤": true, - "⌶": true, - "⫱": true, - "𝕥": true, - "⫚": true, - "⤩": true, - "‴": true, - "™": true, - "▵": true, - "▿": true, - "◃": true, - "⊴": true, - "≜": true, - "▹": true, - "⊵": true, - "◬": true, - "≜": true, - "⨺": true, - "⨹": true, - "⧍": true, - "⨻": true, - "⏢": true, - "𝓉": true, - "ц": true, - "ћ": true, - "ŧ": true, - "≬": true, - "↞": true, - "↠": true, - "⇑": true, - "⥣": true, - "ú": true, - "ú": true, - "↑": true, - "ў": true, - "ŭ": true, - "û": true, - "û": true, - "у": true, - "⇅": true, - "ű": true, - "⥮": true, - "⥾": true, - "𝔲": true, - "ù": true, - "ù": true, - "↿": true, - "↾": true, - "▀": true, - "⌜": true, - "⌜": true, - "⌏": true, - "◸": true, - "ū": true, - "¨": true, - "¨": true, - "ų": true, - "𝕦": true, - "↑": true, - "↕": true, - "↿": true, - "↾": true, - "⊎": true, - "υ": true, - "ϒ": true, - "υ": true, - "⇈": true, - "⌝": true, - "⌝": true, - "⌎": true, - "ů": true, - "◹": true, - "𝓊": true, - "⋰": true, - "ũ": true, - "▵": true, - "▴": true, - "⇈": true, - "ü": true, - "ü": true, - "⦧": true, - "⇕": true, - "⫨": true, - "⫩": true, - "⊨": true, - "⦜": true, - "ϵ": true, - "ϰ": true, - "∅": true, - "ϕ": true, - "ϖ": true, - "∝": true, - "↕": true, - "ϱ": true, - "ς": true, - "⊊︀": true, - "⫋︀": true, - "⊋︀": true, - "⫌︀": true, - "ϑ": true, - "⊲": true, - "⊳": true, - "в": true, - "⊢": true, - "∨": true, - "⊻": true, - "≚": true, - "⋮": true, - "|": true, - "|": true, - "𝔳": true, - "⊲": true, - "⊂⃒": true, - "⊃⃒": true, - "𝕧": true, - "∝": true, - "⊳": true, - "𝓋": true, - "⫋︀": true, - "⊊︀": true, - "⫌︀": true, - "⊋︀": true, - "⦚": true, - "ŵ": true, - "⩟": true, - "∧": true, - "≙": true, - "℘": true, - "𝔴": true, - "𝕨": true, - "℘": true, - "≀": true, - "≀": true, - "𝓌": true, - "⋂": true, - "◯": true, - "⋃": true, - "▽": true, - "𝔵": true, - "⟺": true, - "⟷": true, - "ξ": true, - "⟸": true, - "⟵": true, - "⟼": true, - "⋻": true, - "⨀": true, - "𝕩": true, - "⨁": true, - "⨂": true, - "⟹": true, - "⟶": true, - "𝓍": true, - "⨆": true, - "⨄": true, - "△": true, - "⋁": true, - "⋀": true, - "ý": true, - "ý": true, - "я": true, - "ŷ": true, - "ы": true, - "¥": true, - "¥": true, - "𝔶": true, - "ї": true, - "𝕪": true, - "𝓎": true, - "ю": true, - "ÿ": true, - "ÿ": true, - "ź": true, - "ž": true, - "з": true, - "ż": true, - "ℨ": true, - "ζ": true, - "𝔷": true, - "ж": true, - "⇝": true, - "𝕫": true, - "𝓏": true, - "‍": true, - "‌": true, -} diff --git a/vendor/github.com/russross/blackfriday/v2/esc.go b/vendor/github.com/russross/blackfriday/v2/esc.go deleted file mode 100644 index 6ab60102c9..0000000000 --- a/vendor/github.com/russross/blackfriday/v2/esc.go +++ /dev/null @@ -1,70 +0,0 @@ -package blackfriday - -import ( - "html" - "io" -) - -var htmlEscaper = [256][]byte{ - '&': []byte("&"), - '<': []byte("<"), - '>': []byte(">"), - '"': []byte("""), -} - -func escapeHTML(w io.Writer, s []byte) { - escapeEntities(w, s, false) -} - -func escapeAllHTML(w io.Writer, s []byte) { - escapeEntities(w, s, true) -} - -func escapeEntities(w io.Writer, s []byte, escapeValidEntities bool) { - var start, end int - for end < len(s) { - escSeq := htmlEscaper[s[end]] - if escSeq != nil { - isEntity, entityEnd := nodeIsEntity(s, end) - if isEntity && !escapeValidEntities { - w.Write(s[start : entityEnd+1]) - start = entityEnd + 1 - } else { - w.Write(s[start:end]) - w.Write(escSeq) - start = end + 1 - } - } - end++ - } - if start < len(s) && end <= len(s) { - w.Write(s[start:end]) - } -} - -func nodeIsEntity(s []byte, end int) (isEntity bool, endEntityPos int) { - isEntity = false - endEntityPos = end + 1 - - if s[end] == '&' { - for endEntityPos < len(s) { - if s[endEntityPos] == ';' { - if entities[string(s[end:endEntityPos+1])] { - isEntity = true - break - } - } - if !isalnum(s[endEntityPos]) && s[endEntityPos] != '&' && s[endEntityPos] != '#' { - break - } - endEntityPos++ - } - } - - return isEntity, endEntityPos -} - -func escLink(w io.Writer, text []byte) { - unesc := html.UnescapeString(string(text)) - escapeHTML(w, []byte(unesc)) -} diff --git a/vendor/github.com/russross/blackfriday/v2/go.mod b/vendor/github.com/russross/blackfriday/v2/go.mod deleted file mode 100644 index 620b74e0ac..0000000000 --- a/vendor/github.com/russross/blackfriday/v2/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/russross/blackfriday/v2 diff --git a/vendor/github.com/russross/blackfriday/v2/html.go b/vendor/github.com/russross/blackfriday/v2/html.go deleted file mode 100644 index cb4f26e30f..0000000000 --- a/vendor/github.com/russross/blackfriday/v2/html.go +++ /dev/null @@ -1,952 +0,0 @@ -// -// Blackfriday Markdown Processor -// Available at http://github.com/russross/blackfriday -// -// Copyright © 2011 Russ Ross . -// Distributed under the Simplified BSD License. -// See README.md for details. -// - -// -// -// HTML rendering backend -// -// - -package blackfriday - -import ( - "bytes" - "fmt" - "io" - "regexp" - "strings" -) - -// HTMLFlags control optional behavior of HTML renderer. -type HTMLFlags int - -// HTML renderer configuration options. -const ( - HTMLFlagsNone HTMLFlags = 0 - SkipHTML HTMLFlags = 1 << iota // Skip preformatted HTML blocks - SkipImages // Skip embedded images - SkipLinks // Skip all links - Safelink // Only link to trusted protocols - NofollowLinks // Only link with rel="nofollow" - NoreferrerLinks // Only link with rel="noreferrer" - NoopenerLinks // Only link with rel="noopener" - HrefTargetBlank // Add a blank target - CompletePage // Generate a complete HTML page - UseXHTML // Generate XHTML output instead of HTML - FootnoteReturnLinks // Generate a link at the end of a footnote to return to the source - Smartypants // Enable smart punctuation substitutions - SmartypantsFractions // Enable smart fractions (with Smartypants) - SmartypantsDashes // Enable smart dashes (with Smartypants) - SmartypantsLatexDashes // Enable LaTeX-style dashes (with Smartypants) - SmartypantsAngledQuotes // Enable angled double quotes (with Smartypants) for double quotes rendering - SmartypantsQuotesNBSP // Enable « French guillemets » (with Smartypants) - TOC // Generate a table of contents -) - -var ( - htmlTagRe = regexp.MustCompile("(?i)^" + htmlTag) -) - -const ( - htmlTag = "(?:" + openTag + "|" + closeTag + "|" + htmlComment + "|" + - processingInstruction + "|" + declaration + "|" + cdata + ")" - closeTag = "]" - openTag = "<" + tagName + attribute + "*" + "\\s*/?>" - attribute = "(?:" + "\\s+" + attributeName + attributeValueSpec + "?)" - attributeValue = "(?:" + unquotedValue + "|" + singleQuotedValue + "|" + doubleQuotedValue + ")" - attributeValueSpec = "(?:" + "\\s*=" + "\\s*" + attributeValue + ")" - attributeName = "[a-zA-Z_:][a-zA-Z0-9:._-]*" - cdata = "" - declaration = "]*>" - doubleQuotedValue = "\"[^\"]*\"" - htmlComment = "|" - processingInstruction = "[<][?].*?[?][>]" - singleQuotedValue = "'[^']*'" - tagName = "[A-Za-z][A-Za-z0-9-]*" - unquotedValue = "[^\"'=<>`\\x00-\\x20]+" -) - -// HTMLRendererParameters is a collection of supplementary parameters tweaking -// the behavior of various parts of HTML renderer. -type HTMLRendererParameters struct { - // Prepend this text to each relative URL. - AbsolutePrefix string - // Add this text to each footnote anchor, to ensure uniqueness. - FootnoteAnchorPrefix string - // Show this text inside the tag for a footnote return link, if the - // HTML_FOOTNOTE_RETURN_LINKS flag is enabled. If blank, the string - // [return] is used. - FootnoteReturnLinkContents string - // If set, add this text to the front of each Heading ID, to ensure - // uniqueness. - HeadingIDPrefix string - // If set, add this text to the back of each Heading ID, to ensure uniqueness. - HeadingIDSuffix string - // Increase heading levels: if the offset is 1,

becomes

etc. - // Negative offset is also valid. - // Resulting levels are clipped between 1 and 6. - HeadingLevelOffset int - - Title string // Document title (used if CompletePage is set) - CSS string // Optional CSS file URL (used if CompletePage is set) - Icon string // Optional icon file URL (used if CompletePage is set) - - Flags HTMLFlags // Flags allow customizing this renderer's behavior -} - -// HTMLRenderer is a type that implements the Renderer interface for HTML output. -// -// Do not create this directly, instead use the NewHTMLRenderer function. -type HTMLRenderer struct { - HTMLRendererParameters - - closeTag string // how to end singleton tags: either " />" or ">" - - // Track heading IDs to prevent ID collision in a single generation. - headingIDs map[string]int - - lastOutputLen int - disableTags int - - sr *SPRenderer -} - -const ( - xhtmlClose = " />" - htmlClose = ">" -) - -// NewHTMLRenderer creates and configures an HTMLRenderer object, which -// satisfies the Renderer interface. -func NewHTMLRenderer(params HTMLRendererParameters) *HTMLRenderer { - // configure the rendering engine - closeTag := htmlClose - if params.Flags&UseXHTML != 0 { - closeTag = xhtmlClose - } - - if params.FootnoteReturnLinkContents == "" { - // U+FE0E is VARIATION SELECTOR-15. - // It suppresses automatic emoji presentation of the preceding - // U+21A9 LEFTWARDS ARROW WITH HOOK on iOS and iPadOS. - params.FootnoteReturnLinkContents = "↩\ufe0e" - } - - return &HTMLRenderer{ - HTMLRendererParameters: params, - - closeTag: closeTag, - headingIDs: make(map[string]int), - - sr: NewSmartypantsRenderer(params.Flags), - } -} - -func isHTMLTag(tag []byte, tagname string) bool { - found, _ := findHTMLTagPos(tag, tagname) - return found -} - -// Look for a character, but ignore it when it's in any kind of quotes, it -// might be JavaScript -func skipUntilCharIgnoreQuotes(html []byte, start int, char byte) int { - inSingleQuote := false - inDoubleQuote := false - inGraveQuote := false - i := start - for i < len(html) { - switch { - case html[i] == char && !inSingleQuote && !inDoubleQuote && !inGraveQuote: - return i - case html[i] == '\'': - inSingleQuote = !inSingleQuote - case html[i] == '"': - inDoubleQuote = !inDoubleQuote - case html[i] == '`': - inGraveQuote = !inGraveQuote - } - i++ - } - return start -} - -func findHTMLTagPos(tag []byte, tagname string) (bool, int) { - i := 0 - if i < len(tag) && tag[0] != '<' { - return false, -1 - } - i++ - i = skipSpace(tag, i) - - if i < len(tag) && tag[i] == '/' { - i++ - } - - i = skipSpace(tag, i) - j := 0 - for ; i < len(tag); i, j = i+1, j+1 { - if j >= len(tagname) { - break - } - - if strings.ToLower(string(tag[i]))[0] != tagname[j] { - return false, -1 - } - } - - if i == len(tag) { - return false, -1 - } - - rightAngle := skipUntilCharIgnoreQuotes(tag, i, '>') - if rightAngle >= i { - return true, rightAngle - } - - return false, -1 -} - -func skipSpace(tag []byte, i int) int { - for i < len(tag) && isspace(tag[i]) { - i++ - } - return i -} - -func isRelativeLink(link []byte) (yes bool) { - // a tag begin with '#' - if link[0] == '#' { - return true - } - - // link begin with '/' but not '//', the second maybe a protocol relative link - if len(link) >= 2 && link[0] == '/' && link[1] != '/' { - return true - } - - // only the root '/' - if len(link) == 1 && link[0] == '/' { - return true - } - - // current directory : begin with "./" - if bytes.HasPrefix(link, []byte("./")) { - return true - } - - // parent directory : begin with "../" - if bytes.HasPrefix(link, []byte("../")) { - return true - } - - return false -} - -func (r *HTMLRenderer) ensureUniqueHeadingID(id string) string { - for count, found := r.headingIDs[id]; found; count, found = r.headingIDs[id] { - tmp := fmt.Sprintf("%s-%d", id, count+1) - - if _, tmpFound := r.headingIDs[tmp]; !tmpFound { - r.headingIDs[id] = count + 1 - id = tmp - } else { - id = id + "-1" - } - } - - if _, found := r.headingIDs[id]; !found { - r.headingIDs[id] = 0 - } - - return id -} - -func (r *HTMLRenderer) addAbsPrefix(link []byte) []byte { - if r.AbsolutePrefix != "" && isRelativeLink(link) && link[0] != '.' { - newDest := r.AbsolutePrefix - if link[0] != '/' { - newDest += "/" - } - newDest += string(link) - return []byte(newDest) - } - return link -} - -func appendLinkAttrs(attrs []string, flags HTMLFlags, link []byte) []string { - if isRelativeLink(link) { - return attrs - } - val := []string{} - if flags&NofollowLinks != 0 { - val = append(val, "nofollow") - } - if flags&NoreferrerLinks != 0 { - val = append(val, "noreferrer") - } - if flags&NoopenerLinks != 0 { - val = append(val, "noopener") - } - if flags&HrefTargetBlank != 0 { - attrs = append(attrs, "target=\"_blank\"") - } - if len(val) == 0 { - return attrs - } - attr := fmt.Sprintf("rel=%q", strings.Join(val, " ")) - return append(attrs, attr) -} - -func isMailto(link []byte) bool { - return bytes.HasPrefix(link, []byte("mailto:")) -} - -func needSkipLink(flags HTMLFlags, dest []byte) bool { - if flags&SkipLinks != 0 { - return true - } - return flags&Safelink != 0 && !isSafeLink(dest) && !isMailto(dest) -} - -func isSmartypantable(node *Node) bool { - pt := node.Parent.Type - return pt != Link && pt != CodeBlock && pt != Code -} - -func appendLanguageAttr(attrs []string, info []byte) []string { - if len(info) == 0 { - return attrs - } - endOfLang := bytes.IndexAny(info, "\t ") - if endOfLang < 0 { - endOfLang = len(info) - } - return append(attrs, fmt.Sprintf("class=\"language-%s\"", info[:endOfLang])) -} - -func (r *HTMLRenderer) tag(w io.Writer, name []byte, attrs []string) { - w.Write(name) - if len(attrs) > 0 { - w.Write(spaceBytes) - w.Write([]byte(strings.Join(attrs, " "))) - } - w.Write(gtBytes) - r.lastOutputLen = 1 -} - -func footnoteRef(prefix string, node *Node) []byte { - urlFrag := prefix + string(slugify(node.Destination)) - anchor := fmt.Sprintf(`%d`, urlFrag, node.NoteID) - return []byte(fmt.Sprintf(`%s`, urlFrag, anchor)) -} - -func footnoteItem(prefix string, slug []byte) []byte { - return []byte(fmt.Sprintf(`
  • `, prefix, slug)) -} - -func footnoteReturnLink(prefix, returnLink string, slug []byte) []byte { - const format = ` %s` - return []byte(fmt.Sprintf(format, prefix, slug, returnLink)) -} - -func itemOpenCR(node *Node) bool { - if node.Prev == nil { - return false - } - ld := node.Parent.ListData - return !ld.Tight && ld.ListFlags&ListTypeDefinition == 0 -} - -func skipParagraphTags(node *Node) bool { - grandparent := node.Parent.Parent - if grandparent == nil || grandparent.Type != List { - return false - } - tightOrTerm := grandparent.Tight || node.Parent.ListFlags&ListTypeTerm != 0 - return grandparent.Type == List && tightOrTerm -} - -func cellAlignment(align CellAlignFlags) string { - switch align { - case TableAlignmentLeft: - return "left" - case TableAlignmentRight: - return "right" - case TableAlignmentCenter: - return "center" - default: - return "" - } -} - -func (r *HTMLRenderer) out(w io.Writer, text []byte) { - if r.disableTags > 0 { - w.Write(htmlTagRe.ReplaceAll(text, []byte{})) - } else { - w.Write(text) - } - r.lastOutputLen = len(text) -} - -func (r *HTMLRenderer) cr(w io.Writer) { - if r.lastOutputLen > 0 { - r.out(w, nlBytes) - } -} - -var ( - nlBytes = []byte{'\n'} - gtBytes = []byte{'>'} - spaceBytes = []byte{' '} -) - -var ( - brTag = []byte("
    ") - brXHTMLTag = []byte("
    ") - emTag = []byte("") - emCloseTag = []byte("") - strongTag = []byte("") - strongCloseTag = []byte("") - delTag = []byte("") - delCloseTag = []byte("") - ttTag = []byte("") - ttCloseTag = []byte("") - aTag = []byte("") - preTag = []byte("
    ")
    -	preCloseTag        = []byte("
    ") - codeTag = []byte("") - codeCloseTag = []byte("") - pTag = []byte("

    ") - pCloseTag = []byte("

    ") - blockquoteTag = []byte("
    ") - blockquoteCloseTag = []byte("
    ") - hrTag = []byte("
    ") - hrXHTMLTag = []byte("
    ") - ulTag = []byte("
      ") - ulCloseTag = []byte("
    ") - olTag = []byte("
      ") - olCloseTag = []byte("
    ") - dlTag = []byte("
    ") - dlCloseTag = []byte("
    ") - liTag = []byte("
  • ") - liCloseTag = []byte("
  • ") - ddTag = []byte("
    ") - ddCloseTag = []byte("
    ") - dtTag = []byte("
    ") - dtCloseTag = []byte("
    ") - tableTag = []byte("") - tableCloseTag = []byte("
    ") - tdTag = []byte("") - thTag = []byte("") - theadTag = []byte("") - theadCloseTag = []byte("") - tbodyTag = []byte("") - tbodyCloseTag = []byte("") - trTag = []byte("") - trCloseTag = []byte("") - h1Tag = []byte("") - h2Tag = []byte("") - h3Tag = []byte("") - h4Tag = []byte("") - h5Tag = []byte("") - h6Tag = []byte("") - - footnotesDivBytes = []byte("\n
    \n\n") - footnotesCloseDivBytes = []byte("\n
    \n") -) - -func headingTagsFromLevel(level int) ([]byte, []byte) { - if level <= 1 { - return h1Tag, h1CloseTag - } - switch level { - case 2: - return h2Tag, h2CloseTag - case 3: - return h3Tag, h3CloseTag - case 4: - return h4Tag, h4CloseTag - case 5: - return h5Tag, h5CloseTag - } - return h6Tag, h6CloseTag -} - -func (r *HTMLRenderer) outHRTag(w io.Writer) { - if r.Flags&UseXHTML == 0 { - r.out(w, hrTag) - } else { - r.out(w, hrXHTMLTag) - } -} - -// RenderNode is a default renderer of a single node of a syntax tree. For -// block nodes it will be called twice: first time with entering=true, second -// time with entering=false, so that it could know when it's working on an open -// tag and when on close. It writes the result to w. -// -// The return value is a way to tell the calling walker to adjust its walk -// pattern: e.g. it can terminate the traversal by returning Terminate. Or it -// can ask the walker to skip a subtree of this node by returning SkipChildren. -// The typical behavior is to return GoToNext, which asks for the usual -// traversal to the next node. -func (r *HTMLRenderer) RenderNode(w io.Writer, node *Node, entering bool) WalkStatus { - attrs := []string{} - switch node.Type { - case Text: - if r.Flags&Smartypants != 0 { - var tmp bytes.Buffer - escapeHTML(&tmp, node.Literal) - r.sr.Process(w, tmp.Bytes()) - } else { - if node.Parent.Type == Link { - escLink(w, node.Literal) - } else { - escapeHTML(w, node.Literal) - } - } - case Softbreak: - r.cr(w) - // TODO: make it configurable via out(renderer.softbreak) - case Hardbreak: - if r.Flags&UseXHTML == 0 { - r.out(w, brTag) - } else { - r.out(w, brXHTMLTag) - } - r.cr(w) - case Emph: - if entering { - r.out(w, emTag) - } else { - r.out(w, emCloseTag) - } - case Strong: - if entering { - r.out(w, strongTag) - } else { - r.out(w, strongCloseTag) - } - case Del: - if entering { - r.out(w, delTag) - } else { - r.out(w, delCloseTag) - } - case HTMLSpan: - if r.Flags&SkipHTML != 0 { - break - } - r.out(w, node.Literal) - case Link: - // mark it but don't link it if it is not a safe link: no smartypants - dest := node.LinkData.Destination - if needSkipLink(r.Flags, dest) { - if entering { - r.out(w, ttTag) - } else { - r.out(w, ttCloseTag) - } - } else { - if entering { - dest = r.addAbsPrefix(dest) - var hrefBuf bytes.Buffer - hrefBuf.WriteString("href=\"") - escLink(&hrefBuf, dest) - hrefBuf.WriteByte('"') - attrs = append(attrs, hrefBuf.String()) - if node.NoteID != 0 { - r.out(w, footnoteRef(r.FootnoteAnchorPrefix, node)) - break - } - attrs = appendLinkAttrs(attrs, r.Flags, dest) - if len(node.LinkData.Title) > 0 { - var titleBuff bytes.Buffer - titleBuff.WriteString("title=\"") - escapeHTML(&titleBuff, node.LinkData.Title) - titleBuff.WriteByte('"') - attrs = append(attrs, titleBuff.String()) - } - r.tag(w, aTag, attrs) - } else { - if node.NoteID != 0 { - break - } - r.out(w, aCloseTag) - } - } - case Image: - if r.Flags&SkipImages != 0 { - return SkipChildren - } - if entering { - dest := node.LinkData.Destination - dest = r.addAbsPrefix(dest) - if r.disableTags == 0 { - //if options.safe && potentiallyUnsafe(dest) { - //out(w, ``)
-				//} else {
-				r.out(w, []byte(`<img src=`)) - } - } - case Code: - r.out(w, codeTag) - escapeAllHTML(w, node.Literal) - r.out(w, codeCloseTag) - case Document: - break - case Paragraph: - if skipParagraphTags(node) { - break - } - if entering { - // TODO: untangle this clusterfuck about when the newlines need - // to be added and when not. - if node.Prev != nil { - switch node.Prev.Type { - case HTMLBlock, List, Paragraph, Heading, CodeBlock, BlockQuote, HorizontalRule: - r.cr(w) - } - } - if node.Parent.Type == BlockQuote && node.Prev == nil { - r.cr(w) - } - r.out(w, pTag) - } else { - r.out(w, pCloseTag) - if !(node.Parent.Type == Item && node.Next == nil) { - r.cr(w) - } - } - case BlockQuote: - if entering { - r.cr(w) - r.out(w, blockquoteTag) - } else { - r.out(w, blockquoteCloseTag) - r.cr(w) - } - case HTMLBlock: - if r.Flags&SkipHTML != 0 { - break - } - r.cr(w) - r.out(w, node.Literal) - r.cr(w) - case Heading: - headingLevel := r.HTMLRendererParameters.HeadingLevelOffset + node.Level - openTag, closeTag := headingTagsFromLevel(headingLevel) - if entering { - if node.IsTitleblock { - attrs = append(attrs, `class="title"`) - } - if node.HeadingID != "" { - id := r.ensureUniqueHeadingID(node.HeadingID) - if r.HeadingIDPrefix != "" { - id = r.HeadingIDPrefix + id - } - if r.HeadingIDSuffix != "" { - id = id + r.HeadingIDSuffix - } - attrs = append(attrs, fmt.Sprintf(`id="%s"`, id)) - } - r.cr(w) - r.tag(w, openTag, attrs) - } else { - r.out(w, closeTag) - if !(node.Parent.Type == Item && node.Next == nil) { - r.cr(w) - } - } - case HorizontalRule: - r.cr(w) - r.outHRTag(w) - r.cr(w) - case List: - openTag := ulTag - closeTag := ulCloseTag - if node.ListFlags&ListTypeOrdered != 0 { - openTag = olTag - closeTag = olCloseTag - } - if node.ListFlags&ListTypeDefinition != 0 { - openTag = dlTag - closeTag = dlCloseTag - } - if entering { - if node.IsFootnotesList { - r.out(w, footnotesDivBytes) - r.outHRTag(w) - r.cr(w) - } - r.cr(w) - if node.Parent.Type == Item && node.Parent.Parent.Tight { - r.cr(w) - } - r.tag(w, openTag[:len(openTag)-1], attrs) - r.cr(w) - } else { - r.out(w, closeTag) - //cr(w) - //if node.parent.Type != Item { - // cr(w) - //} - if node.Parent.Type == Item && node.Next != nil { - r.cr(w) - } - if node.Parent.Type == Document || node.Parent.Type == BlockQuote { - r.cr(w) - } - if node.IsFootnotesList { - r.out(w, footnotesCloseDivBytes) - } - } - case Item: - openTag := liTag - closeTag := liCloseTag - if node.ListFlags&ListTypeDefinition != 0 { - openTag = ddTag - closeTag = ddCloseTag - } - if node.ListFlags&ListTypeTerm != 0 { - openTag = dtTag - closeTag = dtCloseTag - } - if entering { - if itemOpenCR(node) { - r.cr(w) - } - if node.ListData.RefLink != nil { - slug := slugify(node.ListData.RefLink) - r.out(w, footnoteItem(r.FootnoteAnchorPrefix, slug)) - break - } - r.out(w, openTag) - } else { - if node.ListData.RefLink != nil { - slug := slugify(node.ListData.RefLink) - if r.Flags&FootnoteReturnLinks != 0 { - r.out(w, footnoteReturnLink(r.FootnoteAnchorPrefix, r.FootnoteReturnLinkContents, slug)) - } - } - r.out(w, closeTag) - r.cr(w) - } - case CodeBlock: - attrs = appendLanguageAttr(attrs, node.Info) - r.cr(w) - r.out(w, preTag) - r.tag(w, codeTag[:len(codeTag)-1], attrs) - escapeAllHTML(w, node.Literal) - r.out(w, codeCloseTag) - r.out(w, preCloseTag) - if node.Parent.Type != Item { - r.cr(w) - } - case Table: - if entering { - r.cr(w) - r.out(w, tableTag) - } else { - r.out(w, tableCloseTag) - r.cr(w) - } - case TableCell: - openTag := tdTag - closeTag := tdCloseTag - if node.IsHeader { - openTag = thTag - closeTag = thCloseTag - } - if entering { - align := cellAlignment(node.Align) - if align != "" { - attrs = append(attrs, fmt.Sprintf(`align="%s"`, align)) - } - if node.Prev == nil { - r.cr(w) - } - r.tag(w, openTag, attrs) - } else { - r.out(w, closeTag) - r.cr(w) - } - case TableHead: - if entering { - r.cr(w) - r.out(w, theadTag) - } else { - r.out(w, theadCloseTag) - r.cr(w) - } - case TableBody: - if entering { - r.cr(w) - r.out(w, tbodyTag) - // XXX: this is to adhere to a rather silly test. Should fix test. - if node.FirstChild == nil { - r.cr(w) - } - } else { - r.out(w, tbodyCloseTag) - r.cr(w) - } - case TableRow: - if entering { - r.cr(w) - r.out(w, trTag) - } else { - r.out(w, trCloseTag) - r.cr(w) - } - default: - panic("Unknown node type " + node.Type.String()) - } - return GoToNext -} - -// RenderHeader writes HTML document preamble and TOC if requested. -func (r *HTMLRenderer) RenderHeader(w io.Writer, ast *Node) { - r.writeDocumentHeader(w) - if r.Flags&TOC != 0 { - r.writeTOC(w, ast) - } -} - -// RenderFooter writes HTML document footer. -func (r *HTMLRenderer) RenderFooter(w io.Writer, ast *Node) { - if r.Flags&CompletePage == 0 { - return - } - io.WriteString(w, "\n\n\n") -} - -func (r *HTMLRenderer) writeDocumentHeader(w io.Writer) { - if r.Flags&CompletePage == 0 { - return - } - ending := "" - if r.Flags&UseXHTML != 0 { - io.WriteString(w, "\n") - io.WriteString(w, "\n") - ending = " /" - } else { - io.WriteString(w, "\n") - io.WriteString(w, "\n") - } - io.WriteString(w, "\n") - io.WriteString(w, " ") - if r.Flags&Smartypants != 0 { - r.sr.Process(w, []byte(r.Title)) - } else { - escapeHTML(w, []byte(r.Title)) - } - io.WriteString(w, "\n") - io.WriteString(w, " \n") - io.WriteString(w, " \n") - if r.CSS != "" { - io.WriteString(w, " \n") - } - if r.Icon != "" { - io.WriteString(w, " \n") - } - io.WriteString(w, "\n") - io.WriteString(w, "\n\n") -} - -func (r *HTMLRenderer) writeTOC(w io.Writer, ast *Node) { - buf := bytes.Buffer{} - - inHeading := false - tocLevel := 0 - headingCount := 0 - - ast.Walk(func(node *Node, entering bool) WalkStatus { - if node.Type == Heading && !node.HeadingData.IsTitleblock { - inHeading = entering - if entering { - node.HeadingID = fmt.Sprintf("toc_%d", headingCount) - if node.Level == tocLevel { - buf.WriteString("\n\n
  • ") - } else if node.Level < tocLevel { - for node.Level < tocLevel { - tocLevel-- - buf.WriteString("
  • \n") - } - buf.WriteString("\n\n
  • ") - } else { - for node.Level > tocLevel { - tocLevel++ - buf.WriteString("\n") - } - - if buf.Len() > 0 { - io.WriteString(w, "\n") - } - r.lastOutputLen = buf.Len() -} diff --git a/vendor/github.com/russross/blackfriday/v2/inline.go b/vendor/github.com/russross/blackfriday/v2/inline.go deleted file mode 100644 index d45bd94172..0000000000 --- a/vendor/github.com/russross/blackfriday/v2/inline.go +++ /dev/null @@ -1,1228 +0,0 @@ -// -// Blackfriday Markdown Processor -// Available at http://github.com/russross/blackfriday -// -// Copyright © 2011 Russ Ross . -// Distributed under the Simplified BSD License. -// See README.md for details. -// - -// -// Functions to parse inline elements. -// - -package blackfriday - -import ( - "bytes" - "regexp" - "strconv" -) - -var ( - urlRe = `((https?|ftp):\/\/|\/)[-A-Za-z0-9+&@#\/%?=~_|!:,.;\(\)]+` - anchorRe = regexp.MustCompile(`^(]+")?\s?>` + urlRe + `<\/a>)`) - - // https://www.w3.org/TR/html5/syntax.html#character-references - // highest unicode code point in 17 planes (2^20): 1,114,112d = - // 7 dec digits or 6 hex digits - // named entity references can be 2-31 characters with stuff like < - // at one end and ∳ at the other. There - // are also sometimes numbers at the end, although this isn't inherent - // in the specification; there are never numbers anywhere else in - // current character references, though; see ¾ and ▒, etc. - // https://www.w3.org/TR/html5/syntax.html#named-character-references - // - // entity := "&" (named group | number ref) ";" - // named group := [a-zA-Z]{2,31}[0-9]{0,2} - // number ref := "#" (dec ref | hex ref) - // dec ref := [0-9]{1,7} - // hex ref := ("x" | "X") [0-9a-fA-F]{1,6} - htmlEntityRe = regexp.MustCompile(`&([a-zA-Z]{2,31}[0-9]{0,2}|#([0-9]{1,7}|[xX][0-9a-fA-F]{1,6}));`) -) - -// Functions to parse text within a block -// Each function returns the number of chars taken care of -// data is the complete block being rendered -// offset is the number of valid chars before the current cursor - -func (p *Markdown) inline(currBlock *Node, data []byte) { - // handlers might call us recursively: enforce a maximum depth - if p.nesting >= p.maxNesting || len(data) == 0 { - return - } - p.nesting++ - beg, end := 0, 0 - for end < len(data) { - handler := p.inlineCallback[data[end]] - if handler != nil { - if consumed, node := handler(p, data, end); consumed == 0 { - // No action from the callback. - end++ - } else { - // Copy inactive chars into the output. - currBlock.AppendChild(text(data[beg:end])) - if node != nil { - currBlock.AppendChild(node) - } - // Skip past whatever the callback used. - beg = end + consumed - end = beg - } - } else { - end++ - } - } - if beg < len(data) { - if data[end-1] == '\n' { - end-- - } - currBlock.AppendChild(text(data[beg:end])) - } - p.nesting-- -} - -// single and double emphasis parsing -func emphasis(p *Markdown, data []byte, offset int) (int, *Node) { - data = data[offset:] - c := data[0] - - if len(data) > 2 && data[1] != c { - // whitespace cannot follow an opening emphasis; - // strikethrough only takes two characters '~~' - if c == '~' || isspace(data[1]) { - return 0, nil - } - ret, node := helperEmphasis(p, data[1:], c) - if ret == 0 { - return 0, nil - } - - return ret + 1, node - } - - if len(data) > 3 && data[1] == c && data[2] != c { - if isspace(data[2]) { - return 0, nil - } - ret, node := helperDoubleEmphasis(p, data[2:], c) - if ret == 0 { - return 0, nil - } - - return ret + 2, node - } - - if len(data) > 4 && data[1] == c && data[2] == c && data[3] != c { - if c == '~' || isspace(data[3]) { - return 0, nil - } - ret, node := helperTripleEmphasis(p, data, 3, c) - if ret == 0 { - return 0, nil - } - - return ret + 3, node - } - - return 0, nil -} - -func codeSpan(p *Markdown, data []byte, offset int) (int, *Node) { - data = data[offset:] - - nb := 0 - - // count the number of backticks in the delimiter - for nb < len(data) && data[nb] == '`' { - nb++ - } - - // find the next delimiter - i, end := 0, 0 - for end = nb; end < len(data) && i < nb; end++ { - if data[end] == '`' { - i++ - } else { - i = 0 - } - } - - // no matching delimiter? - if i < nb && end >= len(data) { - return 0, nil - } - - // trim outside whitespace - fBegin := nb - for fBegin < end && data[fBegin] == ' ' { - fBegin++ - } - - fEnd := end - nb - for fEnd > fBegin && data[fEnd-1] == ' ' { - fEnd-- - } - - // render the code span - if fBegin != fEnd { - code := NewNode(Code) - code.Literal = data[fBegin:fEnd] - return end, code - } - - return end, nil -} - -// newline preceded by two spaces becomes
    -func maybeLineBreak(p *Markdown, data []byte, offset int) (int, *Node) { - origOffset := offset - for offset < len(data) && data[offset] == ' ' { - offset++ - } - - if offset < len(data) && data[offset] == '\n' { - if offset-origOffset >= 2 { - return offset - origOffset + 1, NewNode(Hardbreak) - } - return offset - origOffset, nil - } - return 0, nil -} - -// newline without two spaces works when HardLineBreak is enabled -func lineBreak(p *Markdown, data []byte, offset int) (int, *Node) { - if p.extensions&HardLineBreak != 0 { - return 1, NewNode(Hardbreak) - } - return 0, nil -} - -type linkType int - -const ( - linkNormal linkType = iota - linkImg - linkDeferredFootnote - linkInlineFootnote -) - -func isReferenceStyleLink(data []byte, pos int, t linkType) bool { - if t == linkDeferredFootnote { - return false - } - return pos < len(data)-1 && data[pos] == '[' && data[pos+1] != '^' -} - -func maybeImage(p *Markdown, data []byte, offset int) (int, *Node) { - if offset < len(data)-1 && data[offset+1] == '[' { - return link(p, data, offset) - } - return 0, nil -} - -func maybeInlineFootnote(p *Markdown, data []byte, offset int) (int, *Node) { - if offset < len(data)-1 && data[offset+1] == '[' { - return link(p, data, offset) - } - return 0, nil -} - -// '[': parse a link or an image or a footnote -func link(p *Markdown, data []byte, offset int) (int, *Node) { - // no links allowed inside regular links, footnote, and deferred footnotes - if p.insideLink && (offset > 0 && data[offset-1] == '[' || len(data)-1 > offset && data[offset+1] == '^') { - return 0, nil - } - - var t linkType - switch { - // special case: ![^text] == deferred footnote (that follows something with - // an exclamation point) - case p.extensions&Footnotes != 0 && len(data)-1 > offset && data[offset+1] == '^': - t = linkDeferredFootnote - // ![alt] == image - case offset >= 0 && data[offset] == '!': - t = linkImg - offset++ - // ^[text] == inline footnote - // [^refId] == deferred footnote - case p.extensions&Footnotes != 0: - if offset >= 0 && data[offset] == '^' { - t = linkInlineFootnote - offset++ - } else if len(data)-1 > offset && data[offset+1] == '^' { - t = linkDeferredFootnote - } - // [text] == regular link - default: - t = linkNormal - } - - data = data[offset:] - - var ( - i = 1 - noteID int - title, link, altContent []byte - textHasNl = false - ) - - if t == linkDeferredFootnote { - i++ - } - - // look for the matching closing bracket - for level := 1; level > 0 && i < len(data); i++ { - switch { - case data[i] == '\n': - textHasNl = true - - case isBackslashEscaped(data, i): - continue - - case data[i] == '[': - level++ - - case data[i] == ']': - level-- - if level <= 0 { - i-- // compensate for extra i++ in for loop - } - } - } - - if i >= len(data) { - return 0, nil - } - - txtE := i - i++ - var footnoteNode *Node - - // skip any amount of whitespace or newline - // (this is much more lax than original markdown syntax) - for i < len(data) && isspace(data[i]) { - i++ - } - - // inline style link - switch { - case i < len(data) && data[i] == '(': - // skip initial whitespace - i++ - - for i < len(data) && isspace(data[i]) { - i++ - } - - linkB := i - - // look for link end: ' " ) - findlinkend: - for i < len(data) { - switch { - case data[i] == '\\': - i += 2 - - case data[i] == ')' || data[i] == '\'' || data[i] == '"': - break findlinkend - - default: - i++ - } - } - - if i >= len(data) { - return 0, nil - } - linkE := i - - // look for title end if present - titleB, titleE := 0, 0 - if data[i] == '\'' || data[i] == '"' { - i++ - titleB = i - - findtitleend: - for i < len(data) { - switch { - case data[i] == '\\': - i += 2 - - case data[i] == ')': - break findtitleend - - default: - i++ - } - } - - if i >= len(data) { - return 0, nil - } - - // skip whitespace after title - titleE = i - 1 - for titleE > titleB && isspace(data[titleE]) { - titleE-- - } - - // check for closing quote presence - if data[titleE] != '\'' && data[titleE] != '"' { - titleB, titleE = 0, 0 - linkE = i - } - } - - // remove whitespace at the end of the link - for linkE > linkB && isspace(data[linkE-1]) { - linkE-- - } - - // remove optional angle brackets around the link - if data[linkB] == '<' { - linkB++ - } - if data[linkE-1] == '>' { - linkE-- - } - - // build escaped link and title - if linkE > linkB { - link = data[linkB:linkE] - } - - if titleE > titleB { - title = data[titleB:titleE] - } - - i++ - - // reference style link - case isReferenceStyleLink(data, i, t): - var id []byte - altContentConsidered := false - - // look for the id - i++ - linkB := i - for i < len(data) && data[i] != ']' { - i++ - } - if i >= len(data) { - return 0, nil - } - linkE := i - - // find the reference - if linkB == linkE { - if textHasNl { - var b bytes.Buffer - - for j := 1; j < txtE; j++ { - switch { - case data[j] != '\n': - b.WriteByte(data[j]) - case data[j-1] != ' ': - b.WriteByte(' ') - } - } - - id = b.Bytes() - } else { - id = data[1:txtE] - altContentConsidered = true - } - } else { - id = data[linkB:linkE] - } - - // find the reference with matching id - lr, ok := p.getRef(string(id)) - if !ok { - return 0, nil - } - - // keep link and title from reference - link = lr.link - title = lr.title - if altContentConsidered { - altContent = lr.text - } - i++ - - // shortcut reference style link or reference or inline footnote - default: - var id []byte - - // craft the id - if textHasNl { - var b bytes.Buffer - - for j := 1; j < txtE; j++ { - switch { - case data[j] != '\n': - b.WriteByte(data[j]) - case data[j-1] != ' ': - b.WriteByte(' ') - } - } - - id = b.Bytes() - } else { - if t == linkDeferredFootnote { - id = data[2:txtE] // get rid of the ^ - } else { - id = data[1:txtE] - } - } - - footnoteNode = NewNode(Item) - if t == linkInlineFootnote { - // create a new reference - noteID = len(p.notes) + 1 - - var fragment []byte - if len(id) > 0 { - if len(id) < 16 { - fragment = make([]byte, len(id)) - } else { - fragment = make([]byte, 16) - } - copy(fragment, slugify(id)) - } else { - fragment = append([]byte("footnote-"), []byte(strconv.Itoa(noteID))...) - } - - ref := &reference{ - noteID: noteID, - hasBlock: false, - link: fragment, - title: id, - footnote: footnoteNode, - } - - p.notes = append(p.notes, ref) - - link = ref.link - title = ref.title - } else { - // find the reference with matching id - lr, ok := p.getRef(string(id)) - if !ok { - return 0, nil - } - - if t == linkDeferredFootnote { - lr.noteID = len(p.notes) + 1 - lr.footnote = footnoteNode - p.notes = append(p.notes, lr) - } - - // keep link and title from reference - link = lr.link - // if inline footnote, title == footnote contents - title = lr.title - noteID = lr.noteID - } - - // rewind the whitespace - i = txtE + 1 - } - - var uLink []byte - if t == linkNormal || t == linkImg { - if len(link) > 0 { - var uLinkBuf bytes.Buffer - unescapeText(&uLinkBuf, link) - uLink = uLinkBuf.Bytes() - } - - // links need something to click on and somewhere to go - if len(uLink) == 0 || (t == linkNormal && txtE <= 1) { - return 0, nil - } - } - - // call the relevant rendering function - var linkNode *Node - switch t { - case linkNormal: - linkNode = NewNode(Link) - linkNode.Destination = normalizeURI(uLink) - linkNode.Title = title - if len(altContent) > 0 { - linkNode.AppendChild(text(altContent)) - } else { - // links cannot contain other links, so turn off link parsing - // temporarily and recurse - insideLink := p.insideLink - p.insideLink = true - p.inline(linkNode, data[1:txtE]) - p.insideLink = insideLink - } - - case linkImg: - linkNode = NewNode(Image) - linkNode.Destination = uLink - linkNode.Title = title - linkNode.AppendChild(text(data[1:txtE])) - i++ - - case linkInlineFootnote, linkDeferredFootnote: - linkNode = NewNode(Link) - linkNode.Destination = link - linkNode.Title = title - linkNode.NoteID = noteID - linkNode.Footnote = footnoteNode - if t == linkInlineFootnote { - i++ - } - - default: - return 0, nil - } - - return i, linkNode -} - -func (p *Markdown) inlineHTMLComment(data []byte) int { - if len(data) < 5 { - return 0 - } - if data[0] != '<' || data[1] != '!' || data[2] != '-' || data[3] != '-' { - return 0 - } - i := 5 - // scan for an end-of-comment marker, across lines if necessary - for i < len(data) && !(data[i-2] == '-' && data[i-1] == '-' && data[i] == '>') { - i++ - } - // no end-of-comment marker - if i >= len(data) { - return 0 - } - return i + 1 -} - -func stripMailto(link []byte) []byte { - if bytes.HasPrefix(link, []byte("mailto://")) { - return link[9:] - } else if bytes.HasPrefix(link, []byte("mailto:")) { - return link[7:] - } else { - return link - } -} - -// autolinkType specifies a kind of autolink that gets detected. -type autolinkType int - -// These are the possible flag values for the autolink renderer. -const ( - notAutolink autolinkType = iota - normalAutolink - emailAutolink -) - -// '<' when tags or autolinks are allowed -func leftAngle(p *Markdown, data []byte, offset int) (int, *Node) { - data = data[offset:] - altype, end := tagLength(data) - if size := p.inlineHTMLComment(data); size > 0 { - end = size - } - if end > 2 { - if altype != notAutolink { - var uLink bytes.Buffer - unescapeText(&uLink, data[1:end+1-2]) - if uLink.Len() > 0 { - link := uLink.Bytes() - node := NewNode(Link) - node.Destination = link - if altype == emailAutolink { - node.Destination = append([]byte("mailto:"), link...) - } - node.AppendChild(text(stripMailto(link))) - return end, node - } - } else { - htmlTag := NewNode(HTMLSpan) - htmlTag.Literal = data[:end] - return end, htmlTag - } - } - - return end, nil -} - -// '\\' backslash escape -var escapeChars = []byte("\\`*_{}[]()#+-.!:|&<>~") - -func escape(p *Markdown, data []byte, offset int) (int, *Node) { - data = data[offset:] - - if len(data) > 1 { - if p.extensions&BackslashLineBreak != 0 && data[1] == '\n' { - return 2, NewNode(Hardbreak) - } - if bytes.IndexByte(escapeChars, data[1]) < 0 { - return 0, nil - } - - return 2, text(data[1:2]) - } - - return 2, nil -} - -func unescapeText(ob *bytes.Buffer, src []byte) { - i := 0 - for i < len(src) { - org := i - for i < len(src) && src[i] != '\\' { - i++ - } - - if i > org { - ob.Write(src[org:i]) - } - - if i+1 >= len(src) { - break - } - - ob.WriteByte(src[i+1]) - i += 2 - } -} - -// '&' escaped when it doesn't belong to an entity -// valid entities are assumed to be anything matching &#?[A-Za-z0-9]+; -func entity(p *Markdown, data []byte, offset int) (int, *Node) { - data = data[offset:] - - end := 1 - - if end < len(data) && data[end] == '#' { - end++ - } - - for end < len(data) && isalnum(data[end]) { - end++ - } - - if end < len(data) && data[end] == ';' { - end++ // real entity - } else { - return 0, nil // lone '&' - } - - ent := data[:end] - // undo & escaping or it will be converted to &amp; by another - // escaper in the renderer - if bytes.Equal(ent, []byte("&")) { - ent = []byte{'&'} - } - - return end, text(ent) -} - -func linkEndsWithEntity(data []byte, linkEnd int) bool { - entityRanges := htmlEntityRe.FindAllIndex(data[:linkEnd], -1) - return entityRanges != nil && entityRanges[len(entityRanges)-1][1] == linkEnd -} - -// hasPrefixCaseInsensitive is a custom implementation of -// strings.HasPrefix(strings.ToLower(s), prefix) -// we rolled our own because ToLower pulls in a huge machinery of lowercasing -// anything from Unicode and that's very slow. Since this func will only be -// used on ASCII protocol prefixes, we can take shortcuts. -func hasPrefixCaseInsensitive(s, prefix []byte) bool { - if len(s) < len(prefix) { - return false - } - delta := byte('a' - 'A') - for i, b := range prefix { - if b != s[i] && b != s[i]+delta { - return false - } - } - return true -} - -var protocolPrefixes = [][]byte{ - []byte("http://"), - []byte("https://"), - []byte("ftp://"), - []byte("file://"), - []byte("mailto:"), -} - -const shortestPrefix = 6 // len("ftp://"), the shortest of the above - -func maybeAutoLink(p *Markdown, data []byte, offset int) (int, *Node) { - // quick check to rule out most false hits - if p.insideLink || len(data) < offset+shortestPrefix { - return 0, nil - } - for _, prefix := range protocolPrefixes { - endOfHead := offset + 8 // 8 is the len() of the longest prefix - if endOfHead > len(data) { - endOfHead = len(data) - } - if hasPrefixCaseInsensitive(data[offset:endOfHead], prefix) { - return autoLink(p, data, offset) - } - } - return 0, nil -} - -func autoLink(p *Markdown, data []byte, offset int) (int, *Node) { - // Now a more expensive check to see if we're not inside an anchor element - anchorStart := offset - offsetFromAnchor := 0 - for anchorStart > 0 && data[anchorStart] != '<' { - anchorStart-- - offsetFromAnchor++ - } - - anchorStr := anchorRe.Find(data[anchorStart:]) - if anchorStr != nil { - anchorClose := NewNode(HTMLSpan) - anchorClose.Literal = anchorStr[offsetFromAnchor:] - return len(anchorStr) - offsetFromAnchor, anchorClose - } - - // scan backward for a word boundary - rewind := 0 - for offset-rewind > 0 && rewind <= 7 && isletter(data[offset-rewind-1]) { - rewind++ - } - if rewind > 6 { // longest supported protocol is "mailto" which has 6 letters - return 0, nil - } - - origData := data - data = data[offset-rewind:] - - if !isSafeLink(data) { - return 0, nil - } - - linkEnd := 0 - for linkEnd < len(data) && !isEndOfLink(data[linkEnd]) { - linkEnd++ - } - - // Skip punctuation at the end of the link - if (data[linkEnd-1] == '.' || data[linkEnd-1] == ',') && data[linkEnd-2] != '\\' { - linkEnd-- - } - - // But don't skip semicolon if it's a part of escaped entity: - if data[linkEnd-1] == ';' && data[linkEnd-2] != '\\' && !linkEndsWithEntity(data, linkEnd) { - linkEnd-- - } - - // See if the link finishes with a punctuation sign that can be closed. - var copen byte - switch data[linkEnd-1] { - case '"': - copen = '"' - case '\'': - copen = '\'' - case ')': - copen = '(' - case ']': - copen = '[' - case '}': - copen = '{' - default: - copen = 0 - } - - if copen != 0 { - bufEnd := offset - rewind + linkEnd - 2 - - openDelim := 1 - - /* Try to close the final punctuation sign in this same line; - * if we managed to close it outside of the URL, that means that it's - * not part of the URL. If it closes inside the URL, that means it - * is part of the URL. - * - * Examples: - * - * foo http://www.pokemon.com/Pikachu_(Electric) bar - * => http://www.pokemon.com/Pikachu_(Electric) - * - * foo (http://www.pokemon.com/Pikachu_(Electric)) bar - * => http://www.pokemon.com/Pikachu_(Electric) - * - * foo http://www.pokemon.com/Pikachu_(Electric)) bar - * => http://www.pokemon.com/Pikachu_(Electric)) - * - * (foo http://www.pokemon.com/Pikachu_(Electric)) bar - * => foo http://www.pokemon.com/Pikachu_(Electric) - */ - - for bufEnd >= 0 && origData[bufEnd] != '\n' && openDelim != 0 { - if origData[bufEnd] == data[linkEnd-1] { - openDelim++ - } - - if origData[bufEnd] == copen { - openDelim-- - } - - bufEnd-- - } - - if openDelim == 0 { - linkEnd-- - } - } - - var uLink bytes.Buffer - unescapeText(&uLink, data[:linkEnd]) - - if uLink.Len() > 0 { - node := NewNode(Link) - node.Destination = uLink.Bytes() - node.AppendChild(text(uLink.Bytes())) - return linkEnd, node - } - - return linkEnd, nil -} - -func isEndOfLink(char byte) bool { - return isspace(char) || char == '<' -} - -var validUris = [][]byte{[]byte("http://"), []byte("https://"), []byte("ftp://"), []byte("mailto://")} -var validPaths = [][]byte{[]byte("/"), []byte("./"), []byte("../")} - -func isSafeLink(link []byte) bool { - for _, path := range validPaths { - if len(link) >= len(path) && bytes.Equal(link[:len(path)], path) { - if len(link) == len(path) { - return true - } else if isalnum(link[len(path)]) { - return true - } - } - } - - for _, prefix := range validUris { - // TODO: handle unicode here - // case-insensitive prefix test - if len(link) > len(prefix) && bytes.Equal(bytes.ToLower(link[:len(prefix)]), prefix) && isalnum(link[len(prefix)]) { - return true - } - } - - return false -} - -// return the length of the given tag, or 0 is it's not valid -func tagLength(data []byte) (autolink autolinkType, end int) { - var i, j int - - // a valid tag can't be shorter than 3 chars - if len(data) < 3 { - return notAutolink, 0 - } - - // begins with a '<' optionally followed by '/', followed by letter or number - if data[0] != '<' { - return notAutolink, 0 - } - if data[1] == '/' { - i = 2 - } else { - i = 1 - } - - if !isalnum(data[i]) { - return notAutolink, 0 - } - - // scheme test - autolink = notAutolink - - // try to find the beginning of an URI - for i < len(data) && (isalnum(data[i]) || data[i] == '.' || data[i] == '+' || data[i] == '-') { - i++ - } - - if i > 1 && i < len(data) && data[i] == '@' { - if j = isMailtoAutoLink(data[i:]); j != 0 { - return emailAutolink, i + j - } - } - - if i > 2 && i < len(data) && data[i] == ':' { - autolink = normalAutolink - i++ - } - - // complete autolink test: no whitespace or ' or " - switch { - case i >= len(data): - autolink = notAutolink - case autolink != notAutolink: - j = i - - for i < len(data) { - if data[i] == '\\' { - i += 2 - } else if data[i] == '>' || data[i] == '\'' || data[i] == '"' || isspace(data[i]) { - break - } else { - i++ - } - - } - - if i >= len(data) { - return autolink, 0 - } - if i > j && data[i] == '>' { - return autolink, i + 1 - } - - // one of the forbidden chars has been found - autolink = notAutolink - } - i += bytes.IndexByte(data[i:], '>') - if i < 0 { - return autolink, 0 - } - return autolink, i + 1 -} - -// look for the address part of a mail autolink and '>' -// this is less strict than the original markdown e-mail address matching -func isMailtoAutoLink(data []byte) int { - nb := 0 - - // address is assumed to be: [-@._a-zA-Z0-9]+ with exactly one '@' - for i := 0; i < len(data); i++ { - if isalnum(data[i]) { - continue - } - - switch data[i] { - case '@': - nb++ - - case '-', '.', '_': - break - - case '>': - if nb == 1 { - return i + 1 - } - return 0 - default: - return 0 - } - } - - return 0 -} - -// look for the next emph char, skipping other constructs -func helperFindEmphChar(data []byte, c byte) int { - i := 0 - - for i < len(data) { - for i < len(data) && data[i] != c && data[i] != '`' && data[i] != '[' { - i++ - } - if i >= len(data) { - return 0 - } - // do not count escaped chars - if i != 0 && data[i-1] == '\\' { - i++ - continue - } - if data[i] == c { - return i - } - - if data[i] == '`' { - // skip a code span - tmpI := 0 - i++ - for i < len(data) && data[i] != '`' { - if tmpI == 0 && data[i] == c { - tmpI = i - } - i++ - } - if i >= len(data) { - return tmpI - } - i++ - } else if data[i] == '[' { - // skip a link - tmpI := 0 - i++ - for i < len(data) && data[i] != ']' { - if tmpI == 0 && data[i] == c { - tmpI = i - } - i++ - } - i++ - for i < len(data) && (data[i] == ' ' || data[i] == '\n') { - i++ - } - if i >= len(data) { - return tmpI - } - if data[i] != '[' && data[i] != '(' { // not a link - if tmpI > 0 { - return tmpI - } - continue - } - cc := data[i] - i++ - for i < len(data) && data[i] != cc { - if tmpI == 0 && data[i] == c { - return i - } - i++ - } - if i >= len(data) { - return tmpI - } - i++ - } - } - return 0 -} - -func helperEmphasis(p *Markdown, data []byte, c byte) (int, *Node) { - i := 0 - - // skip one symbol if coming from emph3 - if len(data) > 1 && data[0] == c && data[1] == c { - i = 1 - } - - for i < len(data) { - length := helperFindEmphChar(data[i:], c) - if length == 0 { - return 0, nil - } - i += length - if i >= len(data) { - return 0, nil - } - - if i+1 < len(data) && data[i+1] == c { - i++ - continue - } - - if data[i] == c && !isspace(data[i-1]) { - - if p.extensions&NoIntraEmphasis != 0 { - if !(i+1 == len(data) || isspace(data[i+1]) || ispunct(data[i+1])) { - continue - } - } - - emph := NewNode(Emph) - p.inline(emph, data[:i]) - return i + 1, emph - } - } - - return 0, nil -} - -func helperDoubleEmphasis(p *Markdown, data []byte, c byte) (int, *Node) { - i := 0 - - for i < len(data) { - length := helperFindEmphChar(data[i:], c) - if length == 0 { - return 0, nil - } - i += length - - if i+1 < len(data) && data[i] == c && data[i+1] == c && i > 0 && !isspace(data[i-1]) { - nodeType := Strong - if c == '~' { - nodeType = Del - } - node := NewNode(nodeType) - p.inline(node, data[:i]) - return i + 2, node - } - i++ - } - return 0, nil -} - -func helperTripleEmphasis(p *Markdown, data []byte, offset int, c byte) (int, *Node) { - i := 0 - origData := data - data = data[offset:] - - for i < len(data) { - length := helperFindEmphChar(data[i:], c) - if length == 0 { - return 0, nil - } - i += length - - // skip whitespace preceded symbols - if data[i] != c || isspace(data[i-1]) { - continue - } - - switch { - case i+2 < len(data) && data[i+1] == c && data[i+2] == c: - // triple symbol found - strong := NewNode(Strong) - em := NewNode(Emph) - strong.AppendChild(em) - p.inline(em, data[:i]) - return i + 3, strong - case (i+1 < len(data) && data[i+1] == c): - // double symbol found, hand over to emph1 - length, node := helperEmphasis(p, origData[offset-2:], c) - if length == 0 { - return 0, nil - } - return length - 2, node - default: - // single symbol found, hand over to emph2 - length, node := helperDoubleEmphasis(p, origData[offset-1:], c) - if length == 0 { - return 0, nil - } - return length - 1, node - } - } - return 0, nil -} - -func text(s []byte) *Node { - node := NewNode(Text) - node.Literal = s - return node -} - -func normalizeURI(s []byte) []byte { - return s // TODO: implement -} diff --git a/vendor/github.com/russross/blackfriday/v2/markdown.go b/vendor/github.com/russross/blackfriday/v2/markdown.go deleted file mode 100644 index 58d2e4538c..0000000000 --- a/vendor/github.com/russross/blackfriday/v2/markdown.go +++ /dev/null @@ -1,950 +0,0 @@ -// Blackfriday Markdown Processor -// Available at http://github.com/russross/blackfriday -// -// Copyright © 2011 Russ Ross . -// Distributed under the Simplified BSD License. -// See README.md for details. - -package blackfriday - -import ( - "bytes" - "fmt" - "io" - "strings" - "unicode/utf8" -) - -// -// Markdown parsing and processing -// - -// Version string of the package. Appears in the rendered document when -// CompletePage flag is on. -const Version = "2.0" - -// Extensions is a bitwise or'ed collection of enabled Blackfriday's -// extensions. -type Extensions int - -// These are the supported markdown parsing extensions. -// OR these values together to select multiple extensions. -const ( - NoExtensions Extensions = 0 - NoIntraEmphasis Extensions = 1 << iota // Ignore emphasis markers inside words - Tables // Render tables - FencedCode // Render fenced code blocks - Autolink // Detect embedded URLs that are not explicitly marked - Strikethrough // Strikethrough text using ~~test~~ - LaxHTMLBlocks // Loosen up HTML block parsing rules - SpaceHeadings // Be strict about prefix heading rules - HardLineBreak // Translate newlines into line breaks - TabSizeEight // Expand tabs to eight spaces instead of four - Footnotes // Pandoc-style footnotes - NoEmptyLineBeforeBlock // No need to insert an empty line to start a (code, quote, ordered list, unordered list) block - HeadingIDs // specify heading IDs with {#id} - Titleblock // Titleblock ala pandoc - AutoHeadingIDs // Create the heading ID from the text - BackslashLineBreak // Translate trailing backslashes into line breaks - DefinitionLists // Render definition lists - - CommonHTMLFlags HTMLFlags = UseXHTML | Smartypants | - SmartypantsFractions | SmartypantsDashes | SmartypantsLatexDashes - - CommonExtensions Extensions = NoIntraEmphasis | Tables | FencedCode | - Autolink | Strikethrough | SpaceHeadings | HeadingIDs | - BackslashLineBreak | DefinitionLists -) - -// ListType contains bitwise or'ed flags for list and list item objects. -type ListType int - -// These are the possible flag values for the ListItem renderer. -// Multiple flag values may be ORed together. -// These are mostly of interest if you are writing a new output format. -const ( - ListTypeOrdered ListType = 1 << iota - ListTypeDefinition - ListTypeTerm - - ListItemContainsBlock - ListItemBeginningOfList // TODO: figure out if this is of any use now - ListItemEndOfList -) - -// CellAlignFlags holds a type of alignment in a table cell. -type CellAlignFlags int - -// These are the possible flag values for the table cell renderer. -// Only a single one of these values will be used; they are not ORed together. -// These are mostly of interest if you are writing a new output format. -const ( - TableAlignmentLeft CellAlignFlags = 1 << iota - TableAlignmentRight - TableAlignmentCenter = (TableAlignmentLeft | TableAlignmentRight) -) - -// The size of a tab stop. -const ( - TabSizeDefault = 4 - TabSizeDouble = 8 -) - -// blockTags is a set of tags that are recognized as HTML block tags. -// Any of these can be included in markdown text without special escaping. -var blockTags = map[string]struct{}{ - "blockquote": {}, - "del": {}, - "div": {}, - "dl": {}, - "fieldset": {}, - "form": {}, - "h1": {}, - "h2": {}, - "h3": {}, - "h4": {}, - "h5": {}, - "h6": {}, - "iframe": {}, - "ins": {}, - "math": {}, - "noscript": {}, - "ol": {}, - "pre": {}, - "p": {}, - "script": {}, - "style": {}, - "table": {}, - "ul": {}, - - // HTML5 - "address": {}, - "article": {}, - "aside": {}, - "canvas": {}, - "figcaption": {}, - "figure": {}, - "footer": {}, - "header": {}, - "hgroup": {}, - "main": {}, - "nav": {}, - "output": {}, - "progress": {}, - "section": {}, - "video": {}, -} - -// Renderer is the rendering interface. This is mostly of interest if you are -// implementing a new rendering format. -// -// Only an HTML implementation is provided in this repository, see the README -// for external implementations. -type Renderer interface { - // RenderNode is the main rendering method. It will be called once for - // every leaf node and twice for every non-leaf node (first with - // entering=true, then with entering=false). The method should write its - // rendition of the node to the supplied writer w. - RenderNode(w io.Writer, node *Node, entering bool) WalkStatus - - // RenderHeader is a method that allows the renderer to produce some - // content preceding the main body of the output document. The header is - // understood in the broad sense here. For example, the default HTML - // renderer will write not only the HTML document preamble, but also the - // table of contents if it was requested. - // - // The method will be passed an entire document tree, in case a particular - // implementation needs to inspect it to produce output. - // - // The output should be written to the supplied writer w. If your - // implementation has no header to write, supply an empty implementation. - RenderHeader(w io.Writer, ast *Node) - - // RenderFooter is a symmetric counterpart of RenderHeader. - RenderFooter(w io.Writer, ast *Node) -} - -// Callback functions for inline parsing. One such function is defined -// for each character that triggers a response when parsing inline data. -type inlineParser func(p *Markdown, data []byte, offset int) (int, *Node) - -// Markdown is a type that holds extensions and the runtime state used by -// Parse, and the renderer. You can not use it directly, construct it with New. -type Markdown struct { - renderer Renderer - referenceOverride ReferenceOverrideFunc - refs map[string]*reference - inlineCallback [256]inlineParser - extensions Extensions - nesting int - maxNesting int - insideLink bool - - // Footnotes need to be ordered as well as available to quickly check for - // presence. If a ref is also a footnote, it's stored both in refs and here - // in notes. Slice is nil if footnotes not enabled. - notes []*reference - - doc *Node - tip *Node // = doc - oldTip *Node - lastMatchedContainer *Node // = doc - allClosed bool -} - -func (p *Markdown) getRef(refid string) (ref *reference, found bool) { - if p.referenceOverride != nil { - r, overridden := p.referenceOverride(refid) - if overridden { - if r == nil { - return nil, false - } - return &reference{ - link: []byte(r.Link), - title: []byte(r.Title), - noteID: 0, - hasBlock: false, - text: []byte(r.Text)}, true - } - } - // refs are case insensitive - ref, found = p.refs[strings.ToLower(refid)] - return ref, found -} - -func (p *Markdown) finalize(block *Node) { - above := block.Parent - block.open = false - p.tip = above -} - -func (p *Markdown) addChild(node NodeType, offset uint32) *Node { - return p.addExistingChild(NewNode(node), offset) -} - -func (p *Markdown) addExistingChild(node *Node, offset uint32) *Node { - for !p.tip.canContain(node.Type) { - p.finalize(p.tip) - } - p.tip.AppendChild(node) - p.tip = node - return node -} - -func (p *Markdown) closeUnmatchedBlocks() { - if !p.allClosed { - for p.oldTip != p.lastMatchedContainer { - parent := p.oldTip.Parent - p.finalize(p.oldTip) - p.oldTip = parent - } - p.allClosed = true - } -} - -// -// -// Public interface -// -// - -// Reference represents the details of a link. -// See the documentation in Options for more details on use-case. -type Reference struct { - // Link is usually the URL the reference points to. - Link string - // Title is the alternate text describing the link in more detail. - Title string - // Text is the optional text to override the ref with if the syntax used was - // [refid][] - Text string -} - -// ReferenceOverrideFunc is expected to be called with a reference string and -// return either a valid Reference type that the reference string maps to or -// nil. If overridden is false, the default reference logic will be executed. -// See the documentation in Options for more details on use-case. -type ReferenceOverrideFunc func(reference string) (ref *Reference, overridden bool) - -// New constructs a Markdown processor. You can use the same With* functions as -// for Run() to customize parser's behavior and the renderer. -func New(opts ...Option) *Markdown { - var p Markdown - for _, opt := range opts { - opt(&p) - } - p.refs = make(map[string]*reference) - p.maxNesting = 16 - p.insideLink = false - docNode := NewNode(Document) - p.doc = docNode - p.tip = docNode - p.oldTip = docNode - p.lastMatchedContainer = docNode - p.allClosed = true - // register inline parsers - p.inlineCallback[' '] = maybeLineBreak - p.inlineCallback['*'] = emphasis - p.inlineCallback['_'] = emphasis - if p.extensions&Strikethrough != 0 { - p.inlineCallback['~'] = emphasis - } - p.inlineCallback['`'] = codeSpan - p.inlineCallback['\n'] = lineBreak - p.inlineCallback['['] = link - p.inlineCallback['<'] = leftAngle - p.inlineCallback['\\'] = escape - p.inlineCallback['&'] = entity - p.inlineCallback['!'] = maybeImage - p.inlineCallback['^'] = maybeInlineFootnote - if p.extensions&Autolink != 0 { - p.inlineCallback['h'] = maybeAutoLink - p.inlineCallback['m'] = maybeAutoLink - p.inlineCallback['f'] = maybeAutoLink - p.inlineCallback['H'] = maybeAutoLink - p.inlineCallback['M'] = maybeAutoLink - p.inlineCallback['F'] = maybeAutoLink - } - if p.extensions&Footnotes != 0 { - p.notes = make([]*reference, 0) - } - return &p -} - -// Option customizes the Markdown processor's default behavior. -type Option func(*Markdown) - -// WithRenderer allows you to override the default renderer. -func WithRenderer(r Renderer) Option { - return func(p *Markdown) { - p.renderer = r - } -} - -// WithExtensions allows you to pick some of the many extensions provided by -// Blackfriday. You can bitwise OR them. -func WithExtensions(e Extensions) Option { - return func(p *Markdown) { - p.extensions = e - } -} - -// WithNoExtensions turns off all extensions and custom behavior. -func WithNoExtensions() Option { - return func(p *Markdown) { - p.extensions = NoExtensions - p.renderer = NewHTMLRenderer(HTMLRendererParameters{ - Flags: HTMLFlagsNone, - }) - } -} - -// WithRefOverride sets an optional function callback that is called every -// time a reference is resolved. -// -// In Markdown, the link reference syntax can be made to resolve a link to -// a reference instead of an inline URL, in one of the following ways: -// -// * [link text][refid] -// * [refid][] -// -// Usually, the refid is defined at the bottom of the Markdown document. If -// this override function is provided, the refid is passed to the override -// function first, before consulting the defined refids at the bottom. If -// the override function indicates an override did not occur, the refids at -// the bottom will be used to fill in the link details. -func WithRefOverride(o ReferenceOverrideFunc) Option { - return func(p *Markdown) { - p.referenceOverride = o - } -} - -// Run is the main entry point to Blackfriday. It parses and renders a -// block of markdown-encoded text. -// -// The simplest invocation of Run takes one argument, input: -// output := Run(input) -// This will parse the input with CommonExtensions enabled and render it with -// the default HTMLRenderer (with CommonHTMLFlags). -// -// Variadic arguments opts can customize the default behavior. Since Markdown -// type does not contain exported fields, you can not use it directly. Instead, -// use the With* functions. For example, this will call the most basic -// functionality, with no extensions: -// output := Run(input, WithNoExtensions()) -// -// You can use any number of With* arguments, even contradicting ones. They -// will be applied in order of appearance and the latter will override the -// former: -// output := Run(input, WithNoExtensions(), WithExtensions(exts), -// WithRenderer(yourRenderer)) -func Run(input []byte, opts ...Option) []byte { - r := NewHTMLRenderer(HTMLRendererParameters{ - Flags: CommonHTMLFlags, - }) - optList := []Option{WithRenderer(r), WithExtensions(CommonExtensions)} - optList = append(optList, opts...) - parser := New(optList...) - ast := parser.Parse(input) - var buf bytes.Buffer - parser.renderer.RenderHeader(&buf, ast) - ast.Walk(func(node *Node, entering bool) WalkStatus { - return parser.renderer.RenderNode(&buf, node, entering) - }) - parser.renderer.RenderFooter(&buf, ast) - return buf.Bytes() -} - -// Parse is an entry point to the parsing part of Blackfriday. It takes an -// input markdown document and produces a syntax tree for its contents. This -// tree can then be rendered with a default or custom renderer, or -// analyzed/transformed by the caller to whatever non-standard needs they have. -// The return value is the root node of the syntax tree. -func (p *Markdown) Parse(input []byte) *Node { - p.block(input) - // Walk the tree and finish up some of unfinished blocks - for p.tip != nil { - p.finalize(p.tip) - } - // Walk the tree again and process inline markdown in each block - p.doc.Walk(func(node *Node, entering bool) WalkStatus { - if node.Type == Paragraph || node.Type == Heading || node.Type == TableCell { - p.inline(node, node.content) - node.content = nil - } - return GoToNext - }) - p.parseRefsToAST() - return p.doc -} - -func (p *Markdown) parseRefsToAST() { - if p.extensions&Footnotes == 0 || len(p.notes) == 0 { - return - } - p.tip = p.doc - block := p.addBlock(List, nil) - block.IsFootnotesList = true - block.ListFlags = ListTypeOrdered - flags := ListItemBeginningOfList - // Note: this loop is intentionally explicit, not range-form. This is - // because the body of the loop will append nested footnotes to p.notes and - // we need to process those late additions. Range form would only walk over - // the fixed initial set. - for i := 0; i < len(p.notes); i++ { - ref := p.notes[i] - p.addExistingChild(ref.footnote, 0) - block := ref.footnote - block.ListFlags = flags | ListTypeOrdered - block.RefLink = ref.link - if ref.hasBlock { - flags |= ListItemContainsBlock - p.block(ref.title) - } else { - p.inline(block, ref.title) - } - flags &^= ListItemBeginningOfList | ListItemContainsBlock - } - above := block.Parent - finalizeList(block) - p.tip = above - block.Walk(func(node *Node, entering bool) WalkStatus { - if node.Type == Paragraph || node.Type == Heading { - p.inline(node, node.content) - node.content = nil - } - return GoToNext - }) -} - -// -// Link references -// -// This section implements support for references that (usually) appear -// as footnotes in a document, and can be referenced anywhere in the document. -// The basic format is: -// -// [1]: http://www.google.com/ "Google" -// [2]: http://www.github.com/ "Github" -// -// Anywhere in the document, the reference can be linked by referring to its -// label, i.e., 1 and 2 in this example, as in: -// -// This library is hosted on [Github][2], a git hosting site. -// -// Actual footnotes as specified in Pandoc and supported by some other Markdown -// libraries such as php-markdown are also taken care of. They look like this: -// -// This sentence needs a bit of further explanation.[^note] -// -// [^note]: This is the explanation. -// -// Footnotes should be placed at the end of the document in an ordered list. -// Finally, there are inline footnotes such as: -// -// Inline footnotes^[Also supported.] provide a quick inline explanation, -// but are rendered at the bottom of the document. -// - -// reference holds all information necessary for a reference-style links or -// footnotes. -// -// Consider this markdown with reference-style links: -// -// [link][ref] -// -// [ref]: /url/ "tooltip title" -// -// It will be ultimately converted to this HTML: -// -//

    link

    -// -// And a reference structure will be populated as follows: -// -// p.refs["ref"] = &reference{ -// link: "/url/", -// title: "tooltip title", -// } -// -// Alternatively, reference can contain information about a footnote. Consider -// this markdown: -// -// Text needing a footnote.[^a] -// -// [^a]: This is the note -// -// A reference structure will be populated as follows: -// -// p.refs["a"] = &reference{ -// link: "a", -// title: "This is the note", -// noteID: , -// } -// -// TODO: As you can see, it begs for splitting into two dedicated structures -// for refs and for footnotes. -type reference struct { - link []byte - title []byte - noteID int // 0 if not a footnote ref - hasBlock bool - footnote *Node // a link to the Item node within a list of footnotes - - text []byte // only gets populated by refOverride feature with Reference.Text -} - -func (r *reference) String() string { - return fmt.Sprintf("{link: %q, title: %q, text: %q, noteID: %d, hasBlock: %v}", - r.link, r.title, r.text, r.noteID, r.hasBlock) -} - -// Check whether or not data starts with a reference link. -// If so, it is parsed and stored in the list of references -// (in the render struct). -// Returns the number of bytes to skip to move past it, -// or zero if the first line is not a reference. -func isReference(p *Markdown, data []byte, tabSize int) int { - // up to 3 optional leading spaces - if len(data) < 4 { - return 0 - } - i := 0 - for i < 3 && data[i] == ' ' { - i++ - } - - noteID := 0 - - // id part: anything but a newline between brackets - if data[i] != '[' { - return 0 - } - i++ - if p.extensions&Footnotes != 0 { - if i < len(data) && data[i] == '^' { - // we can set it to anything here because the proper noteIds will - // be assigned later during the second pass. It just has to be != 0 - noteID = 1 - i++ - } - } - idOffset := i - for i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != ']' { - i++ - } - if i >= len(data) || data[i] != ']' { - return 0 - } - idEnd := i - // footnotes can have empty ID, like this: [^], but a reference can not be - // empty like this: []. Break early if it's not a footnote and there's no ID - if noteID == 0 && idOffset == idEnd { - return 0 - } - // spacer: colon (space | tab)* newline? (space | tab)* - i++ - if i >= len(data) || data[i] != ':' { - return 0 - } - i++ - for i < len(data) && (data[i] == ' ' || data[i] == '\t') { - i++ - } - if i < len(data) && (data[i] == '\n' || data[i] == '\r') { - i++ - if i < len(data) && data[i] == '\n' && data[i-1] == '\r' { - i++ - } - } - for i < len(data) && (data[i] == ' ' || data[i] == '\t') { - i++ - } - if i >= len(data) { - return 0 - } - - var ( - linkOffset, linkEnd int - titleOffset, titleEnd int - lineEnd int - raw []byte - hasBlock bool - ) - - if p.extensions&Footnotes != 0 && noteID != 0 { - linkOffset, linkEnd, raw, hasBlock = scanFootnote(p, data, i, tabSize) - lineEnd = linkEnd - } else { - linkOffset, linkEnd, titleOffset, titleEnd, lineEnd = scanLinkRef(p, data, i) - } - if lineEnd == 0 { - return 0 - } - - // a valid ref has been found - - ref := &reference{ - noteID: noteID, - hasBlock: hasBlock, - } - - if noteID > 0 { - // reusing the link field for the id since footnotes don't have links - ref.link = data[idOffset:idEnd] - // if footnote, it's not really a title, it's the contained text - ref.title = raw - } else { - ref.link = data[linkOffset:linkEnd] - ref.title = data[titleOffset:titleEnd] - } - - // id matches are case-insensitive - id := string(bytes.ToLower(data[idOffset:idEnd])) - - p.refs[id] = ref - - return lineEnd -} - -func scanLinkRef(p *Markdown, data []byte, i int) (linkOffset, linkEnd, titleOffset, titleEnd, lineEnd int) { - // link: whitespace-free sequence, optionally between angle brackets - if data[i] == '<' { - i++ - } - linkOffset = i - for i < len(data) && data[i] != ' ' && data[i] != '\t' && data[i] != '\n' && data[i] != '\r' { - i++ - } - linkEnd = i - if data[linkOffset] == '<' && data[linkEnd-1] == '>' { - linkOffset++ - linkEnd-- - } - - // optional spacer: (space | tab)* (newline | '\'' | '"' | '(' ) - for i < len(data) && (data[i] == ' ' || data[i] == '\t') { - i++ - } - if i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != '\'' && data[i] != '"' && data[i] != '(' { - return - } - - // compute end-of-line - if i >= len(data) || data[i] == '\r' || data[i] == '\n' { - lineEnd = i - } - if i+1 < len(data) && data[i] == '\r' && data[i+1] == '\n' { - lineEnd++ - } - - // optional (space|tab)* spacer after a newline - if lineEnd > 0 { - i = lineEnd + 1 - for i < len(data) && (data[i] == ' ' || data[i] == '\t') { - i++ - } - } - - // optional title: any non-newline sequence enclosed in '"() alone on its line - if i+1 < len(data) && (data[i] == '\'' || data[i] == '"' || data[i] == '(') { - i++ - titleOffset = i - - // look for EOL - for i < len(data) && data[i] != '\n' && data[i] != '\r' { - i++ - } - if i+1 < len(data) && data[i] == '\n' && data[i+1] == '\r' { - titleEnd = i + 1 - } else { - titleEnd = i - } - - // step back - i-- - for i > titleOffset && (data[i] == ' ' || data[i] == '\t') { - i-- - } - if i > titleOffset && (data[i] == '\'' || data[i] == '"' || data[i] == ')') { - lineEnd = titleEnd - titleEnd = i - } - } - - return -} - -// The first bit of this logic is the same as Parser.listItem, but the rest -// is much simpler. This function simply finds the entire block and shifts it -// over by one tab if it is indeed a block (just returns the line if it's not). -// blockEnd is the end of the section in the input buffer, and contents is the -// extracted text that was shifted over one tab. It will need to be rendered at -// the end of the document. -func scanFootnote(p *Markdown, data []byte, i, indentSize int) (blockStart, blockEnd int, contents []byte, hasBlock bool) { - if i == 0 || len(data) == 0 { - return - } - - // skip leading whitespace on first line - for i < len(data) && data[i] == ' ' { - i++ - } - - blockStart = i - - // find the end of the line - blockEnd = i - for i < len(data) && data[i-1] != '\n' { - i++ - } - - // get working buffer - var raw bytes.Buffer - - // put the first line into the working buffer - raw.Write(data[blockEnd:i]) - blockEnd = i - - // process the following lines - containsBlankLine := false - -gatherLines: - for blockEnd < len(data) { - i++ - - // find the end of this line - for i < len(data) && data[i-1] != '\n' { - i++ - } - - // if it is an empty line, guess that it is part of this item - // and move on to the next line - if p.isEmpty(data[blockEnd:i]) > 0 { - containsBlankLine = true - blockEnd = i - continue - } - - n := 0 - if n = isIndented(data[blockEnd:i], indentSize); n == 0 { - // this is the end of the block. - // we don't want to include this last line in the index. - break gatherLines - } - - // if there were blank lines before this one, insert a new one now - if containsBlankLine { - raw.WriteByte('\n') - containsBlankLine = false - } - - // get rid of that first tab, write to buffer - raw.Write(data[blockEnd+n : i]) - hasBlock = true - - blockEnd = i - } - - if data[blockEnd-1] != '\n' { - raw.WriteByte('\n') - } - - contents = raw.Bytes() - - return -} - -// -// -// Miscellaneous helper functions -// -// - -// Test if a character is a punctuation symbol. -// Taken from a private function in regexp in the stdlib. -func ispunct(c byte) bool { - for _, r := range []byte("!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~") { - if c == r { - return true - } - } - return false -} - -// Test if a character is a whitespace character. -func isspace(c byte) bool { - return ishorizontalspace(c) || isverticalspace(c) -} - -// Test if a character is a horizontal whitespace character. -func ishorizontalspace(c byte) bool { - return c == ' ' || c == '\t' -} - -// Test if a character is a vertical character. -func isverticalspace(c byte) bool { - return c == '\n' || c == '\r' || c == '\f' || c == '\v' -} - -// Test if a character is letter. -func isletter(c byte) bool { - return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') -} - -// Test if a character is a letter or a digit. -// TODO: check when this is looking for ASCII alnum and when it should use unicode -func isalnum(c byte) bool { - return (c >= '0' && c <= '9') || isletter(c) -} - -// Replace tab characters with spaces, aligning to the next TAB_SIZE column. -// always ends output with a newline -func expandTabs(out *bytes.Buffer, line []byte, tabSize int) { - // first, check for common cases: no tabs, or only tabs at beginning of line - i, prefix := 0, 0 - slowcase := false - for i = 0; i < len(line); i++ { - if line[i] == '\t' { - if prefix == i { - prefix++ - } else { - slowcase = true - break - } - } - } - - // no need to decode runes if all tabs are at the beginning of the line - if !slowcase { - for i = 0; i < prefix*tabSize; i++ { - out.WriteByte(' ') - } - out.Write(line[prefix:]) - return - } - - // the slow case: we need to count runes to figure out how - // many spaces to insert for each tab - column := 0 - i = 0 - for i < len(line) { - start := i - for i < len(line) && line[i] != '\t' { - _, size := utf8.DecodeRune(line[i:]) - i += size - column++ - } - - if i > start { - out.Write(line[start:i]) - } - - if i >= len(line) { - break - } - - for { - out.WriteByte(' ') - column++ - if column%tabSize == 0 { - break - } - } - - i++ - } -} - -// Find if a line counts as indented or not. -// Returns number of characters the indent is (0 = not indented). -func isIndented(data []byte, indentSize int) int { - if len(data) == 0 { - return 0 - } - if data[0] == '\t' { - return 1 - } - if len(data) < indentSize { - return 0 - } - for i := 0; i < indentSize; i++ { - if data[i] != ' ' { - return 0 - } - } - return indentSize -} - -// Create a url-safe slug for fragments -func slugify(in []byte) []byte { - if len(in) == 0 { - return in - } - out := make([]byte, 0, len(in)) - sym := false - - for _, ch := range in { - if isalnum(ch) { - sym = false - out = append(out, ch) - } else if sym { - continue - } else { - out = append(out, '-') - sym = true - } - } - var a, b int - var ch byte - for a, ch = range out { - if ch != '-' { - break - } - } - for b = len(out) - 1; b > 0; b-- { - if out[b] != '-' { - break - } - } - return out[a : b+1] -} diff --git a/vendor/github.com/russross/blackfriday/v2/node.go b/vendor/github.com/russross/blackfriday/v2/node.go deleted file mode 100644 index 04e6050cee..0000000000 --- a/vendor/github.com/russross/blackfriday/v2/node.go +++ /dev/null @@ -1,360 +0,0 @@ -package blackfriday - -import ( - "bytes" - "fmt" -) - -// NodeType specifies a type of a single node of a syntax tree. Usually one -// node (and its type) corresponds to a single markdown feature, e.g. emphasis -// or code block. -type NodeType int - -// Constants for identifying different types of nodes. See NodeType. -const ( - Document NodeType = iota - BlockQuote - List - Item - Paragraph - Heading - HorizontalRule - Emph - Strong - Del - Link - Image - Text - HTMLBlock - CodeBlock - Softbreak - Hardbreak - Code - HTMLSpan - Table - TableCell - TableHead - TableBody - TableRow -) - -var nodeTypeNames = []string{ - Document: "Document", - BlockQuote: "BlockQuote", - List: "List", - Item: "Item", - Paragraph: "Paragraph", - Heading: "Heading", - HorizontalRule: "HorizontalRule", - Emph: "Emph", - Strong: "Strong", - Del: "Del", - Link: "Link", - Image: "Image", - Text: "Text", - HTMLBlock: "HTMLBlock", - CodeBlock: "CodeBlock", - Softbreak: "Softbreak", - Hardbreak: "Hardbreak", - Code: "Code", - HTMLSpan: "HTMLSpan", - Table: "Table", - TableCell: "TableCell", - TableHead: "TableHead", - TableBody: "TableBody", - TableRow: "TableRow", -} - -func (t NodeType) String() string { - return nodeTypeNames[t] -} - -// ListData contains fields relevant to a List and Item node type. -type ListData struct { - ListFlags ListType - Tight bool // Skip

    s around list item data if true - BulletChar byte // '*', '+' or '-' in bullet lists - Delimiter byte // '.' or ')' after the number in ordered lists - RefLink []byte // If not nil, turns this list item into a footnote item and triggers different rendering - IsFootnotesList bool // This is a list of footnotes -} - -// LinkData contains fields relevant to a Link node type. -type LinkData struct { - Destination []byte // Destination is what goes into a href - Title []byte // Title is the tooltip thing that goes in a title attribute - NoteID int // NoteID contains a serial number of a footnote, zero if it's not a footnote - Footnote *Node // If it's a footnote, this is a direct link to the footnote Node. Otherwise nil. -} - -// CodeBlockData contains fields relevant to a CodeBlock node type. -type CodeBlockData struct { - IsFenced bool // Specifies whether it's a fenced code block or an indented one - Info []byte // This holds the info string - FenceChar byte - FenceLength int - FenceOffset int -} - -// TableCellData contains fields relevant to a TableCell node type. -type TableCellData struct { - IsHeader bool // This tells if it's under the header row - Align CellAlignFlags // This holds the value for align attribute -} - -// HeadingData contains fields relevant to a Heading node type. -type HeadingData struct { - Level int // This holds the heading level number - HeadingID string // This might hold heading ID, if present - IsTitleblock bool // Specifies whether it's a title block -} - -// Node is a single element in the abstract syntax tree of the parsed document. -// It holds connections to the structurally neighboring nodes and, for certain -// types of nodes, additional information that might be needed when rendering. -type Node struct { - Type NodeType // Determines the type of the node - Parent *Node // Points to the parent - FirstChild *Node // Points to the first child, if any - LastChild *Node // Points to the last child, if any - Prev *Node // Previous sibling; nil if it's the first child - Next *Node // Next sibling; nil if it's the last child - - Literal []byte // Text contents of the leaf nodes - - HeadingData // Populated if Type is Heading - ListData // Populated if Type is List - CodeBlockData // Populated if Type is CodeBlock - LinkData // Populated if Type is Link - TableCellData // Populated if Type is TableCell - - content []byte // Markdown content of the block nodes - open bool // Specifies an open block node that has not been finished to process yet -} - -// NewNode allocates a node of a specified type. -func NewNode(typ NodeType) *Node { - return &Node{ - Type: typ, - open: true, - } -} - -func (n *Node) String() string { - ellipsis := "" - snippet := n.Literal - if len(snippet) > 16 { - snippet = snippet[:16] - ellipsis = "..." - } - return fmt.Sprintf("%s: '%s%s'", n.Type, snippet, ellipsis) -} - -// Unlink removes node 'n' from the tree. -// It panics if the node is nil. -func (n *Node) Unlink() { - if n.Prev != nil { - n.Prev.Next = n.Next - } else if n.Parent != nil { - n.Parent.FirstChild = n.Next - } - if n.Next != nil { - n.Next.Prev = n.Prev - } else if n.Parent != nil { - n.Parent.LastChild = n.Prev - } - n.Parent = nil - n.Next = nil - n.Prev = nil -} - -// AppendChild adds a node 'child' as a child of 'n'. -// It panics if either node is nil. -func (n *Node) AppendChild(child *Node) { - child.Unlink() - child.Parent = n - if n.LastChild != nil { - n.LastChild.Next = child - child.Prev = n.LastChild - n.LastChild = child - } else { - n.FirstChild = child - n.LastChild = child - } -} - -// InsertBefore inserts 'sibling' immediately before 'n'. -// It panics if either node is nil. -func (n *Node) InsertBefore(sibling *Node) { - sibling.Unlink() - sibling.Prev = n.Prev - if sibling.Prev != nil { - sibling.Prev.Next = sibling - } - sibling.Next = n - n.Prev = sibling - sibling.Parent = n.Parent - if sibling.Prev == nil { - sibling.Parent.FirstChild = sibling - } -} - -// IsContainer returns true if 'n' can contain children. -func (n *Node) IsContainer() bool { - switch n.Type { - case Document: - fallthrough - case BlockQuote: - fallthrough - case List: - fallthrough - case Item: - fallthrough - case Paragraph: - fallthrough - case Heading: - fallthrough - case Emph: - fallthrough - case Strong: - fallthrough - case Del: - fallthrough - case Link: - fallthrough - case Image: - fallthrough - case Table: - fallthrough - case TableHead: - fallthrough - case TableBody: - fallthrough - case TableRow: - fallthrough - case TableCell: - return true - default: - return false - } -} - -// IsLeaf returns true if 'n' is a leaf node. -func (n *Node) IsLeaf() bool { - return !n.IsContainer() -} - -func (n *Node) canContain(t NodeType) bool { - if n.Type == List { - return t == Item - } - if n.Type == Document || n.Type == BlockQuote || n.Type == Item { - return t != Item - } - if n.Type == Table { - return t == TableHead || t == TableBody - } - if n.Type == TableHead || n.Type == TableBody { - return t == TableRow - } - if n.Type == TableRow { - return t == TableCell - } - return false -} - -// WalkStatus allows NodeVisitor to have some control over the tree traversal. -// It is returned from NodeVisitor and different values allow Node.Walk to -// decide which node to go to next. -type WalkStatus int - -const ( - // GoToNext is the default traversal of every node. - GoToNext WalkStatus = iota - // SkipChildren tells walker to skip all children of current node. - SkipChildren - // Terminate tells walker to terminate the traversal. - Terminate -) - -// NodeVisitor is a callback to be called when traversing the syntax tree. -// Called twice for every node: once with entering=true when the branch is -// first visited, then with entering=false after all the children are done. -type NodeVisitor func(node *Node, entering bool) WalkStatus - -// Walk is a convenience method that instantiates a walker and starts a -// traversal of subtree rooted at n. -func (n *Node) Walk(visitor NodeVisitor) { - w := newNodeWalker(n) - for w.current != nil { - status := visitor(w.current, w.entering) - switch status { - case GoToNext: - w.next() - case SkipChildren: - w.entering = false - w.next() - case Terminate: - return - } - } -} - -type nodeWalker struct { - current *Node - root *Node - entering bool -} - -func newNodeWalker(root *Node) *nodeWalker { - return &nodeWalker{ - current: root, - root: root, - entering: true, - } -} - -func (nw *nodeWalker) next() { - if (!nw.current.IsContainer() || !nw.entering) && nw.current == nw.root { - nw.current = nil - return - } - if nw.entering && nw.current.IsContainer() { - if nw.current.FirstChild != nil { - nw.current = nw.current.FirstChild - nw.entering = true - } else { - nw.entering = false - } - } else if nw.current.Next == nil { - nw.current = nw.current.Parent - nw.entering = false - } else { - nw.current = nw.current.Next - nw.entering = true - } -} - -func dump(ast *Node) { - fmt.Println(dumpString(ast)) -} - -func dumpR(ast *Node, depth int) string { - if ast == nil { - return "" - } - indent := bytes.Repeat([]byte("\t"), depth) - content := ast.Literal - if content == nil { - content = ast.content - } - result := fmt.Sprintf("%s%s(%q)\n", indent, ast.Type, content) - for n := ast.FirstChild; n != nil; n = n.Next { - result += dumpR(n, depth+1) - } - return result -} - -func dumpString(ast *Node) string { - return dumpR(ast, 0) -} diff --git a/vendor/github.com/russross/blackfriday/v2/smartypants.go b/vendor/github.com/russross/blackfriday/v2/smartypants.go deleted file mode 100644 index 3a220e9424..0000000000 --- a/vendor/github.com/russross/blackfriday/v2/smartypants.go +++ /dev/null @@ -1,457 +0,0 @@ -// -// Blackfriday Markdown Processor -// Available at http://github.com/russross/blackfriday -// -// Copyright © 2011 Russ Ross . -// Distributed under the Simplified BSD License. -// See README.md for details. -// - -// -// -// SmartyPants rendering -// -// - -package blackfriday - -import ( - "bytes" - "io" -) - -// SPRenderer is a struct containing state of a Smartypants renderer. -type SPRenderer struct { - inSingleQuote bool - inDoubleQuote bool - callbacks [256]smartCallback -} - -func wordBoundary(c byte) bool { - return c == 0 || isspace(c) || ispunct(c) -} - -func tolower(c byte) byte { - if c >= 'A' && c <= 'Z' { - return c - 'A' + 'a' - } - return c -} - -func isdigit(c byte) bool { - return c >= '0' && c <= '9' -} - -func smartQuoteHelper(out *bytes.Buffer, previousChar byte, nextChar byte, quote byte, isOpen *bool, addNBSP bool) bool { - // edge of the buffer is likely to be a tag that we don't get to see, - // so we treat it like text sometimes - - // enumerate all sixteen possibilities for (previousChar, nextChar) - // each can be one of {0, space, punct, other} - switch { - case previousChar == 0 && nextChar == 0: - // context is not any help here, so toggle - *isOpen = !*isOpen - case isspace(previousChar) && nextChar == 0: - // [ "] might be [ "foo...] - *isOpen = true - case ispunct(previousChar) && nextChar == 0: - // [!"] hmm... could be [Run!"] or [("...] - *isOpen = false - case /* isnormal(previousChar) && */ nextChar == 0: - // [a"] is probably a close - *isOpen = false - case previousChar == 0 && isspace(nextChar): - // [" ] might be [...foo" ] - *isOpen = false - case isspace(previousChar) && isspace(nextChar): - // [ " ] context is not any help here, so toggle - *isOpen = !*isOpen - case ispunct(previousChar) && isspace(nextChar): - // [!" ] is probably a close - *isOpen = false - case /* isnormal(previousChar) && */ isspace(nextChar): - // [a" ] this is one of the easy cases - *isOpen = false - case previousChar == 0 && ispunct(nextChar): - // ["!] hmm... could be ["$1.95] or ["!...] - *isOpen = false - case isspace(previousChar) && ispunct(nextChar): - // [ "!] looks more like [ "$1.95] - *isOpen = true - case ispunct(previousChar) && ispunct(nextChar): - // [!"!] context is not any help here, so toggle - *isOpen = !*isOpen - case /* isnormal(previousChar) && */ ispunct(nextChar): - // [a"!] is probably a close - *isOpen = false - case previousChar == 0 /* && isnormal(nextChar) */ : - // ["a] is probably an open - *isOpen = true - case isspace(previousChar) /* && isnormal(nextChar) */ : - // [ "a] this is one of the easy cases - *isOpen = true - case ispunct(previousChar) /* && isnormal(nextChar) */ : - // [!"a] is probably an open - *isOpen = true - default: - // [a'b] maybe a contraction? - *isOpen = false - } - - // Note that with the limited lookahead, this non-breaking - // space will also be appended to single double quotes. - if addNBSP && !*isOpen { - out.WriteString(" ") - } - - out.WriteByte('&') - if *isOpen { - out.WriteByte('l') - } else { - out.WriteByte('r') - } - out.WriteByte(quote) - out.WriteString("quo;") - - if addNBSP && *isOpen { - out.WriteString(" ") - } - - return true -} - -func (r *SPRenderer) smartSingleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { - if len(text) >= 2 { - t1 := tolower(text[1]) - - if t1 == '\'' { - nextChar := byte(0) - if len(text) >= 3 { - nextChar = text[2] - } - if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) { - return 1 - } - } - - if (t1 == 's' || t1 == 't' || t1 == 'm' || t1 == 'd') && (len(text) < 3 || wordBoundary(text[2])) { - out.WriteString("’") - return 0 - } - - if len(text) >= 3 { - t2 := tolower(text[2]) - - if ((t1 == 'r' && t2 == 'e') || (t1 == 'l' && t2 == 'l') || (t1 == 'v' && t2 == 'e')) && - (len(text) < 4 || wordBoundary(text[3])) { - out.WriteString("’") - return 0 - } - } - } - - nextChar := byte(0) - if len(text) > 1 { - nextChar = text[1] - } - if smartQuoteHelper(out, previousChar, nextChar, 's', &r.inSingleQuote, false) { - return 0 - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartParens(out *bytes.Buffer, previousChar byte, text []byte) int { - if len(text) >= 3 { - t1 := tolower(text[1]) - t2 := tolower(text[2]) - - if t1 == 'c' && t2 == ')' { - out.WriteString("©") - return 2 - } - - if t1 == 'r' && t2 == ')' { - out.WriteString("®") - return 2 - } - - if len(text) >= 4 && t1 == 't' && t2 == 'm' && text[3] == ')' { - out.WriteString("™") - return 3 - } - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartDash(out *bytes.Buffer, previousChar byte, text []byte) int { - if len(text) >= 2 { - if text[1] == '-' { - out.WriteString("—") - return 1 - } - - if wordBoundary(previousChar) && wordBoundary(text[1]) { - out.WriteString("–") - return 0 - } - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartDashLatex(out *bytes.Buffer, previousChar byte, text []byte) int { - if len(text) >= 3 && text[1] == '-' && text[2] == '-' { - out.WriteString("—") - return 2 - } - if len(text) >= 2 && text[1] == '-' { - out.WriteString("–") - return 1 - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartAmpVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte, addNBSP bool) int { - if bytes.HasPrefix(text, []byte(""")) { - nextChar := byte(0) - if len(text) >= 7 { - nextChar = text[6] - } - if smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, addNBSP) { - return 5 - } - } - - if bytes.HasPrefix(text, []byte("�")) { - return 3 - } - - out.WriteByte('&') - return 0 -} - -func (r *SPRenderer) smartAmp(angledQuotes, addNBSP bool) func(*bytes.Buffer, byte, []byte) int { - var quote byte = 'd' - if angledQuotes { - quote = 'a' - } - - return func(out *bytes.Buffer, previousChar byte, text []byte) int { - return r.smartAmpVariant(out, previousChar, text, quote, addNBSP) - } -} - -func (r *SPRenderer) smartPeriod(out *bytes.Buffer, previousChar byte, text []byte) int { - if len(text) >= 3 && text[1] == '.' && text[2] == '.' { - out.WriteString("…") - return 2 - } - - if len(text) >= 5 && text[1] == ' ' && text[2] == '.' && text[3] == ' ' && text[4] == '.' { - out.WriteString("…") - return 4 - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartBacktick(out *bytes.Buffer, previousChar byte, text []byte) int { - if len(text) >= 2 && text[1] == '`' { - nextChar := byte(0) - if len(text) >= 3 { - nextChar = text[2] - } - if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) { - return 1 - } - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartNumberGeneric(out *bytes.Buffer, previousChar byte, text []byte) int { - if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 { - // is it of the form digits/digits(word boundary)?, i.e., \d+/\d+\b - // note: check for regular slash (/) or fraction slash (⁄, 0x2044, or 0xe2 81 84 in utf-8) - // and avoid changing dates like 1/23/2005 into fractions. - numEnd := 0 - for len(text) > numEnd && isdigit(text[numEnd]) { - numEnd++ - } - if numEnd == 0 { - out.WriteByte(text[0]) - return 0 - } - denStart := numEnd + 1 - if len(text) > numEnd+3 && text[numEnd] == 0xe2 && text[numEnd+1] == 0x81 && text[numEnd+2] == 0x84 { - denStart = numEnd + 3 - } else if len(text) < numEnd+2 || text[numEnd] != '/' { - out.WriteByte(text[0]) - return 0 - } - denEnd := denStart - for len(text) > denEnd && isdigit(text[denEnd]) { - denEnd++ - } - if denEnd == denStart { - out.WriteByte(text[0]) - return 0 - } - if len(text) == denEnd || wordBoundary(text[denEnd]) && text[denEnd] != '/' { - out.WriteString("") - out.Write(text[:numEnd]) - out.WriteString("") - out.Write(text[denStart:denEnd]) - out.WriteString("") - return denEnd - 1 - } - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartNumber(out *bytes.Buffer, previousChar byte, text []byte) int { - if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 { - if text[0] == '1' && text[1] == '/' && text[2] == '2' { - if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' { - out.WriteString("½") - return 2 - } - } - - if text[0] == '1' && text[1] == '/' && text[2] == '4' { - if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 5 && tolower(text[3]) == 't' && tolower(text[4]) == 'h') { - out.WriteString("¼") - return 2 - } - } - - if text[0] == '3' && text[1] == '/' && text[2] == '4' { - if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 6 && tolower(text[3]) == 't' && tolower(text[4]) == 'h' && tolower(text[5]) == 's') { - out.WriteString("¾") - return 2 - } - } - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartDoubleQuoteVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte) int { - nextChar := byte(0) - if len(text) > 1 { - nextChar = text[1] - } - if !smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, false) { - out.WriteString(""") - } - - return 0 -} - -func (r *SPRenderer) smartDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { - return r.smartDoubleQuoteVariant(out, previousChar, text, 'd') -} - -func (r *SPRenderer) smartAngledDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { - return r.smartDoubleQuoteVariant(out, previousChar, text, 'a') -} - -func (r *SPRenderer) smartLeftAngle(out *bytes.Buffer, previousChar byte, text []byte) int { - i := 0 - - for i < len(text) && text[i] != '>' { - i++ - } - - out.Write(text[:i+1]) - return i -} - -type smartCallback func(out *bytes.Buffer, previousChar byte, text []byte) int - -// NewSmartypantsRenderer constructs a Smartypants renderer object. -func NewSmartypantsRenderer(flags HTMLFlags) *SPRenderer { - var ( - r SPRenderer - - smartAmpAngled = r.smartAmp(true, false) - smartAmpAngledNBSP = r.smartAmp(true, true) - smartAmpRegular = r.smartAmp(false, false) - smartAmpRegularNBSP = r.smartAmp(false, true) - - addNBSP = flags&SmartypantsQuotesNBSP != 0 - ) - - if flags&SmartypantsAngledQuotes == 0 { - r.callbacks['"'] = r.smartDoubleQuote - if !addNBSP { - r.callbacks['&'] = smartAmpRegular - } else { - r.callbacks['&'] = smartAmpRegularNBSP - } - } else { - r.callbacks['"'] = r.smartAngledDoubleQuote - if !addNBSP { - r.callbacks['&'] = smartAmpAngled - } else { - r.callbacks['&'] = smartAmpAngledNBSP - } - } - r.callbacks['\''] = r.smartSingleQuote - r.callbacks['('] = r.smartParens - if flags&SmartypantsDashes != 0 { - if flags&SmartypantsLatexDashes == 0 { - r.callbacks['-'] = r.smartDash - } else { - r.callbacks['-'] = r.smartDashLatex - } - } - r.callbacks['.'] = r.smartPeriod - if flags&SmartypantsFractions == 0 { - r.callbacks['1'] = r.smartNumber - r.callbacks['3'] = r.smartNumber - } else { - for ch := '1'; ch <= '9'; ch++ { - r.callbacks[ch] = r.smartNumberGeneric - } - } - r.callbacks['<'] = r.smartLeftAngle - r.callbacks['`'] = r.smartBacktick - return &r -} - -// Process is the entry point of the Smartypants renderer. -func (r *SPRenderer) Process(w io.Writer, text []byte) { - mark := 0 - for i := 0; i < len(text); i++ { - if action := r.callbacks[text[i]]; action != nil { - if i > mark { - w.Write(text[mark:i]) - } - previousChar := byte(0) - if i > 0 { - previousChar = text[i-1] - } - var tmp bytes.Buffer - i += action(&tmp, previousChar, text[i:]) - w.Write(tmp.Bytes()) - mark = i + 1 - } - } - if mark < len(text) { - w.Write(text[mark:]) - } -} diff --git a/vendor/github.com/spf13/cobra/doc/README.md b/vendor/github.com/spf13/cobra/doc/README.md deleted file mode 100644 index 6ea4eb6623..0000000000 --- a/vendor/github.com/spf13/cobra/doc/README.md +++ /dev/null @@ -1,12 +0,0 @@ -# Documentation generation - -- [Man page docs](./man_docs.md) -- [Markdown docs](./md_docs.md) -- [Rest docs](./rest_docs.md) -- [Yaml docs](./yaml_docs.md) - -## Options -### `DisableAutoGenTag` -You may set `cmd.DisableAutoGenTag = true` -to _entirely_ remove the auto generated string "Auto generated by spf13/cobra..." -from any documentation source. diff --git a/vendor/github.com/spf13/cobra/doc/man_docs.go b/vendor/github.com/spf13/cobra/doc/man_docs.go deleted file mode 100644 index 916e36144d..0000000000 --- a/vendor/github.com/spf13/cobra/doc/man_docs.go +++ /dev/null @@ -1,245 +0,0 @@ -// Copyright 2015 Red Hat Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package doc - -import ( - "bytes" - "fmt" - "io" - "os" - "path/filepath" - "sort" - "strconv" - "strings" - "time" - - "github.com/cpuguy83/go-md2man/v2/md2man" - "github.com/spf13/cobra" - "github.com/spf13/pflag" -) - -// GenManTree will generate a man page for this command and all descendants -// in the directory given. The header may be nil. This function may not work -// correctly if your command names have `-` in them. If you have `cmd` with two -// subcmds, `sub` and `sub-third`, and `sub` has a subcommand called `third` -// it is undefined which help output will be in the file `cmd-sub-third.1`. -func GenManTree(cmd *cobra.Command, header *GenManHeader, dir string) error { - return GenManTreeFromOpts(cmd, GenManTreeOptions{ - Header: header, - Path: dir, - CommandSeparator: "-", - }) -} - -// GenManTreeFromOpts generates a man page for the command and all descendants. -// The pages are written to the opts.Path directory. -func GenManTreeFromOpts(cmd *cobra.Command, opts GenManTreeOptions) error { - header := opts.Header - if header == nil { - header = &GenManHeader{} - } - for _, c := range cmd.Commands() { - if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() { - continue - } - if err := GenManTreeFromOpts(c, opts); err != nil { - return err - } - } - section := "1" - if header.Section != "" { - section = header.Section - } - - separator := "_" - if opts.CommandSeparator != "" { - separator = opts.CommandSeparator - } - basename := strings.Replace(cmd.CommandPath(), " ", separator, -1) - filename := filepath.Join(opts.Path, basename+"."+section) - f, err := os.Create(filename) - if err != nil { - return err - } - defer f.Close() - - headerCopy := *header - return GenMan(cmd, &headerCopy, f) -} - -// GenManTreeOptions is the options for generating the man pages. -// Used only in GenManTreeFromOpts. -type GenManTreeOptions struct { - Header *GenManHeader - Path string - CommandSeparator string -} - -// GenManHeader is a lot like the .TH header at the start of man pages. These -// include the title, section, date, source, and manual. We will use the -// current time if Date is unset and will use "Auto generated by spf13/cobra" -// if the Source is unset. -type GenManHeader struct { - Title string - Section string - Date *time.Time - date string - Source string - Manual string -} - -// GenMan will generate a man page for the given command and write it to -// w. The header argument may be nil, however obviously w may not. -func GenMan(cmd *cobra.Command, header *GenManHeader, w io.Writer) error { - if header == nil { - header = &GenManHeader{} - } - if err := fillHeader(header, cmd.CommandPath(), cmd.DisableAutoGenTag); err != nil { - return err - } - - b := genMan(cmd, header) - _, err := w.Write(md2man.Render(b)) - return err -} - -func fillHeader(header *GenManHeader, name string, disableAutoGen bool) error { - if header.Title == "" { - header.Title = strings.ToUpper(strings.Replace(name, " ", "\\-", -1)) - } - if header.Section == "" { - header.Section = "1" - } - if header.Date == nil { - now := time.Now() - if epoch := os.Getenv("SOURCE_DATE_EPOCH"); epoch != "" { - unixEpoch, err := strconv.ParseInt(epoch, 10, 64) - if err != nil { - return fmt.Errorf("invalid SOURCE_DATE_EPOCH: %v", err) - } - now = time.Unix(unixEpoch, 0) - } - header.Date = &now - } - header.date = (*header.Date).Format("Jan 2006") - if header.Source == "" && !disableAutoGen { - header.Source = "Auto generated by spf13/cobra" - } - return nil -} - -func manPreamble(buf io.StringWriter, header *GenManHeader, cmd *cobra.Command, dashedName string) { - description := cmd.Long - if len(description) == 0 { - description = cmd.Short - } - - cobra.WriteStringAndCheck(buf, fmt.Sprintf(`%% "%s" "%s" "%s" "%s" "%s" -# NAME -`, header.Title, header.Section, header.date, header.Source, header.Manual)) - cobra.WriteStringAndCheck(buf, fmt.Sprintf("%s \\- %s\n\n", dashedName, cmd.Short)) - cobra.WriteStringAndCheck(buf, "# SYNOPSIS\n") - cobra.WriteStringAndCheck(buf, fmt.Sprintf("**%s**\n\n", cmd.UseLine())) - cobra.WriteStringAndCheck(buf, "# DESCRIPTION\n") - cobra.WriteStringAndCheck(buf, description+"\n\n") -} - -func manPrintFlags(buf io.StringWriter, flags *pflag.FlagSet) { - flags.VisitAll(func(flag *pflag.Flag) { - if len(flag.Deprecated) > 0 || flag.Hidden { - return - } - format := "" - if len(flag.Shorthand) > 0 && len(flag.ShorthandDeprecated) == 0 { - format = fmt.Sprintf("**-%s**, **--%s**", flag.Shorthand, flag.Name) - } else { - format = fmt.Sprintf("**--%s**", flag.Name) - } - if len(flag.NoOptDefVal) > 0 { - format += "[" - } - if flag.Value.Type() == "string" { - // put quotes on the value - format += "=%q" - } else { - format += "=%s" - } - if len(flag.NoOptDefVal) > 0 { - format += "]" - } - format += "\n\t%s\n\n" - cobra.WriteStringAndCheck(buf, fmt.Sprintf(format, flag.DefValue, flag.Usage)) - }) -} - -func manPrintOptions(buf io.StringWriter, command *cobra.Command) { - flags := command.NonInheritedFlags() - if flags.HasAvailableFlags() { - cobra.WriteStringAndCheck(buf, "# OPTIONS\n") - manPrintFlags(buf, flags) - cobra.WriteStringAndCheck(buf, "\n") - } - flags = command.InheritedFlags() - if flags.HasAvailableFlags() { - cobra.WriteStringAndCheck(buf, "# OPTIONS INHERITED FROM PARENT COMMANDS\n") - manPrintFlags(buf, flags) - cobra.WriteStringAndCheck(buf, "\n") - } -} - -func genMan(cmd *cobra.Command, header *GenManHeader) []byte { - cmd.InitDefaultHelpCmd() - cmd.InitDefaultHelpFlag() - - // something like `rootcmd-subcmd1-subcmd2` - dashCommandName := strings.Replace(cmd.CommandPath(), " ", "-", -1) - - buf := new(bytes.Buffer) - - manPreamble(buf, header, cmd, dashCommandName) - manPrintOptions(buf, cmd) - if len(cmd.Example) > 0 { - buf.WriteString("# EXAMPLE\n") - buf.WriteString(fmt.Sprintf("```\n%s\n```\n", cmd.Example)) - } - if hasSeeAlso(cmd) { - buf.WriteString("# SEE ALSO\n") - seealsos := make([]string, 0) - if cmd.HasParent() { - parentPath := cmd.Parent().CommandPath() - dashParentPath := strings.Replace(parentPath, " ", "-", -1) - seealso := fmt.Sprintf("**%s(%s)**", dashParentPath, header.Section) - seealsos = append(seealsos, seealso) - cmd.VisitParents(func(c *cobra.Command) { - if c.DisableAutoGenTag { - cmd.DisableAutoGenTag = c.DisableAutoGenTag - } - }) - } - children := cmd.Commands() - sort.Sort(byName(children)) - for _, c := range children { - if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() { - continue - } - seealso := fmt.Sprintf("**%s-%s(%s)**", dashCommandName, c.Name(), header.Section) - seealsos = append(seealsos, seealso) - } - buf.WriteString(strings.Join(seealsos, ", ") + "\n") - } - if !cmd.DisableAutoGenTag { - buf.WriteString(fmt.Sprintf("# HISTORY\n%s Auto generated by spf13/cobra\n", header.Date.Format("2-Jan-2006"))) - } - return buf.Bytes() -} diff --git a/vendor/github.com/spf13/cobra/doc/man_docs.md b/vendor/github.com/spf13/cobra/doc/man_docs.md deleted file mode 100644 index 3709160f34..0000000000 --- a/vendor/github.com/spf13/cobra/doc/man_docs.md +++ /dev/null @@ -1,31 +0,0 @@ -# Generating Man Pages For Your Own cobra.Command - -Generating man pages from a cobra command is incredibly easy. An example is as follows: - -```go -package main - -import ( - "log" - - "github.com/spf13/cobra" - "github.com/spf13/cobra/doc" -) - -func main() { - cmd := &cobra.Command{ - Use: "test", - Short: "my test program", - } - header := &doc.GenManHeader{ - Title: "MINE", - Section: "3", - } - err := doc.GenManTree(cmd, header, "/tmp") - if err != nil { - log.Fatal(err) - } -} -``` - -That will get you a man page `/tmp/test.3` diff --git a/vendor/github.com/spf13/cobra/doc/md_docs.go b/vendor/github.com/spf13/cobra/doc/md_docs.go deleted file mode 100644 index 5181af8dc2..0000000000 --- a/vendor/github.com/spf13/cobra/doc/md_docs.go +++ /dev/null @@ -1,155 +0,0 @@ -//Copyright 2015 Red Hat Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package doc - -import ( - "bytes" - "fmt" - "io" - "os" - "path/filepath" - "sort" - "strings" - "time" - - "github.com/spf13/cobra" -) - -func printOptions(buf *bytes.Buffer, cmd *cobra.Command, name string) error { - flags := cmd.NonInheritedFlags() - flags.SetOutput(buf) - if flags.HasAvailableFlags() { - buf.WriteString("### Options\n\n```\n") - flags.PrintDefaults() - buf.WriteString("```\n\n") - } - - parentFlags := cmd.InheritedFlags() - parentFlags.SetOutput(buf) - if parentFlags.HasAvailableFlags() { - buf.WriteString("### Options inherited from parent commands\n\n```\n") - parentFlags.PrintDefaults() - buf.WriteString("```\n\n") - } - return nil -} - -// GenMarkdown creates markdown output. -func GenMarkdown(cmd *cobra.Command, w io.Writer) error { - return GenMarkdownCustom(cmd, w, func(s string) string { return s }) -} - -// GenMarkdownCustom creates custom markdown output. -func GenMarkdownCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string) string) error { - cmd.InitDefaultHelpCmd() - cmd.InitDefaultHelpFlag() - - buf := new(bytes.Buffer) - name := cmd.CommandPath() - - buf.WriteString("## " + name + "\n\n") - buf.WriteString(cmd.Short + "\n\n") - if len(cmd.Long) > 0 { - buf.WriteString("### Synopsis\n\n") - buf.WriteString(cmd.Long + "\n\n") - } - - if cmd.Runnable() { - buf.WriteString(fmt.Sprintf("```\n%s\n```\n\n", cmd.UseLine())) - } - - if len(cmd.Example) > 0 { - buf.WriteString("### Examples\n\n") - buf.WriteString(fmt.Sprintf("```\n%s\n```\n\n", cmd.Example)) - } - - if err := printOptions(buf, cmd, name); err != nil { - return err - } - if hasSeeAlso(cmd) { - buf.WriteString("### SEE ALSO\n\n") - if cmd.HasParent() { - parent := cmd.Parent() - pname := parent.CommandPath() - link := pname + ".md" - link = strings.Replace(link, " ", "_", -1) - buf.WriteString(fmt.Sprintf("* [%s](%s)\t - %s\n", pname, linkHandler(link), parent.Short)) - cmd.VisitParents(func(c *cobra.Command) { - if c.DisableAutoGenTag { - cmd.DisableAutoGenTag = c.DisableAutoGenTag - } - }) - } - - children := cmd.Commands() - sort.Sort(byName(children)) - - for _, child := range children { - if !child.IsAvailableCommand() || child.IsAdditionalHelpTopicCommand() { - continue - } - cname := name + " " + child.Name() - link := cname + ".md" - link = strings.Replace(link, " ", "_", -1) - buf.WriteString(fmt.Sprintf("* [%s](%s)\t - %s\n", cname, linkHandler(link), child.Short)) - } - buf.WriteString("\n") - } - if !cmd.DisableAutoGenTag { - buf.WriteString("###### Auto generated by spf13/cobra on " + time.Now().Format("2-Jan-2006") + "\n") - } - _, err := buf.WriteTo(w) - return err -} - -// GenMarkdownTree will generate a markdown page for this command and all -// descendants in the directory given. The header may be nil. -// This function may not work correctly if your command names have `-` in them. -// If you have `cmd` with two subcmds, `sub` and `sub-third`, -// and `sub` has a subcommand called `third`, it is undefined which -// help output will be in the file `cmd-sub-third.1`. -func GenMarkdownTree(cmd *cobra.Command, dir string) error { - identity := func(s string) string { return s } - emptyStr := func(s string) string { return "" } - return GenMarkdownTreeCustom(cmd, dir, emptyStr, identity) -} - -// GenMarkdownTreeCustom is the the same as GenMarkdownTree, but -// with custom filePrepender and linkHandler. -func GenMarkdownTreeCustom(cmd *cobra.Command, dir string, filePrepender, linkHandler func(string) string) error { - for _, c := range cmd.Commands() { - if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() { - continue - } - if err := GenMarkdownTreeCustom(c, dir, filePrepender, linkHandler); err != nil { - return err - } - } - - basename := strings.Replace(cmd.CommandPath(), " ", "_", -1) + ".md" - filename := filepath.Join(dir, basename) - f, err := os.Create(filename) - if err != nil { - return err - } - defer f.Close() - - if _, err := io.WriteString(f, filePrepender(filename)); err != nil { - return err - } - if err := GenMarkdownCustom(cmd, f, linkHandler); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/spf13/cobra/doc/md_docs.md b/vendor/github.com/spf13/cobra/doc/md_docs.md deleted file mode 100644 index 5c870625f7..0000000000 --- a/vendor/github.com/spf13/cobra/doc/md_docs.md +++ /dev/null @@ -1,115 +0,0 @@ -# Generating Markdown Docs For Your Own cobra.Command - -Generating Markdown pages from a cobra command is incredibly easy. An example is as follows: - -```go -package main - -import ( - "log" - - "github.com/spf13/cobra" - "github.com/spf13/cobra/doc" -) - -func main() { - cmd := &cobra.Command{ - Use: "test", - Short: "my test program", - } - err := doc.GenMarkdownTree(cmd, "/tmp") - if err != nil { - log.Fatal(err) - } -} -``` - -That will get you a Markdown document `/tmp/test.md` - -## Generate markdown docs for the entire command tree - -This program can actually generate docs for the kubectl command in the kubernetes project - -```go -package main - -import ( - "log" - "io/ioutil" - "os" - - "k8s.io/kubernetes/pkg/kubectl/cmd" - cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" - - "github.com/spf13/cobra/doc" -) - -func main() { - kubectl := cmd.NewKubectlCommand(cmdutil.NewFactory(nil), os.Stdin, ioutil.Discard, ioutil.Discard) - err := doc.GenMarkdownTree(kubectl, "./") - if err != nil { - log.Fatal(err) - } -} -``` - -This will generate a whole series of files, one for each command in the tree, in the directory specified (in this case "./") - -## Generate markdown docs for a single command - -You may wish to have more control over the output, or only generate for a single command, instead of the entire command tree. If this is the case you may prefer to `GenMarkdown` instead of `GenMarkdownTree` - -```go - out := new(bytes.Buffer) - err := doc.GenMarkdown(cmd, out) - if err != nil { - log.Fatal(err) - } -``` - -This will write the markdown doc for ONLY "cmd" into the out, buffer. - -## Customize the output - -Both `GenMarkdown` and `GenMarkdownTree` have alternate versions with callbacks to get some control of the output: - -```go -func GenMarkdownTreeCustom(cmd *Command, dir string, filePrepender, linkHandler func(string) string) error { - //... -} -``` - -```go -func GenMarkdownCustom(cmd *Command, out *bytes.Buffer, linkHandler func(string) string) error { - //... -} -``` - -The `filePrepender` will prepend the return value given the full filepath to the rendered Markdown file. A common use case is to add front matter to use the generated documentation with [Hugo](http://gohugo.io/): - -```go -const fmTemplate = `--- -date: %s -title: "%s" -slug: %s -url: %s ---- -` - -filePrepender := func(filename string) string { - now := time.Now().Format(time.RFC3339) - name := filepath.Base(filename) - base := strings.TrimSuffix(name, path.Ext(name)) - url := "/commands/" + strings.ToLower(base) + "/" - return fmt.Sprintf(fmTemplate, now, strings.Replace(base, "_", " ", -1), base, url) -} -``` - -The `linkHandler` can be used to customize the rendered internal links to the commands, given a filename: - -```go -linkHandler := func(name string) string { - base := strings.TrimSuffix(name, path.Ext(name)) - return "/commands/" + strings.ToLower(base) + "/" -} -``` diff --git a/vendor/github.com/spf13/cobra/doc/rest_docs.go b/vendor/github.com/spf13/cobra/doc/rest_docs.go deleted file mode 100644 index 051d8dc832..0000000000 --- a/vendor/github.com/spf13/cobra/doc/rest_docs.go +++ /dev/null @@ -1,185 +0,0 @@ -//Copyright 2015 Red Hat Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package doc - -import ( - "bytes" - "fmt" - "io" - "os" - "path/filepath" - "sort" - "strings" - "time" - - "github.com/spf13/cobra" -) - -func printOptionsReST(buf *bytes.Buffer, cmd *cobra.Command, name string) error { - flags := cmd.NonInheritedFlags() - flags.SetOutput(buf) - if flags.HasAvailableFlags() { - buf.WriteString("Options\n") - buf.WriteString("~~~~~~~\n\n::\n\n") - flags.PrintDefaults() - buf.WriteString("\n") - } - - parentFlags := cmd.InheritedFlags() - parentFlags.SetOutput(buf) - if parentFlags.HasAvailableFlags() { - buf.WriteString("Options inherited from parent commands\n") - buf.WriteString("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n::\n\n") - parentFlags.PrintDefaults() - buf.WriteString("\n") - } - return nil -} - -// linkHandler for default ReST hyperlink markup -func defaultLinkHandler(name, ref string) string { - return fmt.Sprintf("`%s <%s.rst>`_", name, ref) -} - -// GenReST creates reStructured Text output. -func GenReST(cmd *cobra.Command, w io.Writer) error { - return GenReSTCustom(cmd, w, defaultLinkHandler) -} - -// GenReSTCustom creates custom reStructured Text output. -func GenReSTCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string, string) string) error { - cmd.InitDefaultHelpCmd() - cmd.InitDefaultHelpFlag() - - buf := new(bytes.Buffer) - name := cmd.CommandPath() - - short := cmd.Short - long := cmd.Long - if len(long) == 0 { - long = short - } - ref := strings.Replace(name, " ", "_", -1) - - buf.WriteString(".. _" + ref + ":\n\n") - buf.WriteString(name + "\n") - buf.WriteString(strings.Repeat("-", len(name)) + "\n\n") - buf.WriteString(short + "\n\n") - buf.WriteString("Synopsis\n") - buf.WriteString("~~~~~~~~\n\n") - buf.WriteString("\n" + long + "\n\n") - - if cmd.Runnable() { - buf.WriteString(fmt.Sprintf("::\n\n %s\n\n", cmd.UseLine())) - } - - if len(cmd.Example) > 0 { - buf.WriteString("Examples\n") - buf.WriteString("~~~~~~~~\n\n") - buf.WriteString(fmt.Sprintf("::\n\n%s\n\n", indentString(cmd.Example, " "))) - } - - if err := printOptionsReST(buf, cmd, name); err != nil { - return err - } - if hasSeeAlso(cmd) { - buf.WriteString("SEE ALSO\n") - buf.WriteString("~~~~~~~~\n\n") - if cmd.HasParent() { - parent := cmd.Parent() - pname := parent.CommandPath() - ref = strings.Replace(pname, " ", "_", -1) - buf.WriteString(fmt.Sprintf("* %s \t - %s\n", linkHandler(pname, ref), parent.Short)) - cmd.VisitParents(func(c *cobra.Command) { - if c.DisableAutoGenTag { - cmd.DisableAutoGenTag = c.DisableAutoGenTag - } - }) - } - - children := cmd.Commands() - sort.Sort(byName(children)) - - for _, child := range children { - if !child.IsAvailableCommand() || child.IsAdditionalHelpTopicCommand() { - continue - } - cname := name + " " + child.Name() - ref = strings.Replace(cname, " ", "_", -1) - buf.WriteString(fmt.Sprintf("* %s \t - %s\n", linkHandler(cname, ref), child.Short)) - } - buf.WriteString("\n") - } - if !cmd.DisableAutoGenTag { - buf.WriteString("*Auto generated by spf13/cobra on " + time.Now().Format("2-Jan-2006") + "*\n") - } - _, err := buf.WriteTo(w) - return err -} - -// GenReSTTree will generate a ReST page for this command and all -// descendants in the directory given. -// This function may not work correctly if your command names have `-` in them. -// If you have `cmd` with two subcmds, `sub` and `sub-third`, -// and `sub` has a subcommand called `third`, it is undefined which -// help output will be in the file `cmd-sub-third.1`. -func GenReSTTree(cmd *cobra.Command, dir string) error { - emptyStr := func(s string) string { return "" } - return GenReSTTreeCustom(cmd, dir, emptyStr, defaultLinkHandler) -} - -// GenReSTTreeCustom is the the same as GenReSTTree, but -// with custom filePrepender and linkHandler. -func GenReSTTreeCustom(cmd *cobra.Command, dir string, filePrepender func(string) string, linkHandler func(string, string) string) error { - for _, c := range cmd.Commands() { - if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() { - continue - } - if err := GenReSTTreeCustom(c, dir, filePrepender, linkHandler); err != nil { - return err - } - } - - basename := strings.Replace(cmd.CommandPath(), " ", "_", -1) + ".rst" - filename := filepath.Join(dir, basename) - f, err := os.Create(filename) - if err != nil { - return err - } - defer f.Close() - - if _, err := io.WriteString(f, filePrepender(filename)); err != nil { - return err - } - if err := GenReSTCustom(cmd, f, linkHandler); err != nil { - return err - } - return nil -} - -// adapted from: https://github.com/kr/text/blob/main/indent.go -func indentString(s, p string) string { - var res []byte - b := []byte(s) - prefix := []byte(p) - bol := true - for _, c := range b { - if bol && c != '\n' { - res = append(res, prefix...) - } - res = append(res, c) - bol = c == '\n' - } - return string(res) -} diff --git a/vendor/github.com/spf13/cobra/doc/rest_docs.md b/vendor/github.com/spf13/cobra/doc/rest_docs.md deleted file mode 100644 index 6098430eff..0000000000 --- a/vendor/github.com/spf13/cobra/doc/rest_docs.md +++ /dev/null @@ -1,114 +0,0 @@ -# Generating ReStructured Text Docs For Your Own cobra.Command - -Generating ReST pages from a cobra command is incredibly easy. An example is as follows: - -```go -package main - -import ( - "log" - - "github.com/spf13/cobra" - "github.com/spf13/cobra/doc" -) - -func main() { - cmd := &cobra.Command{ - Use: "test", - Short: "my test program", - } - err := doc.GenReSTTree(cmd, "/tmp") - if err != nil { - log.Fatal(err) - } -} -``` - -That will get you a ReST document `/tmp/test.rst` - -## Generate ReST docs for the entire command tree - -This program can actually generate docs for the kubectl command in the kubernetes project - -```go -package main - -import ( - "log" - "io/ioutil" - "os" - - "k8s.io/kubernetes/pkg/kubectl/cmd" - cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" - - "github.com/spf13/cobra/doc" -) - -func main() { - kubectl := cmd.NewKubectlCommand(cmdutil.NewFactory(nil), os.Stdin, ioutil.Discard, ioutil.Discard) - err := doc.GenReSTTree(kubectl, "./") - if err != nil { - log.Fatal(err) - } -} -``` - -This will generate a whole series of files, one for each command in the tree, in the directory specified (in this case "./") - -## Generate ReST docs for a single command - -You may wish to have more control over the output, or only generate for a single command, instead of the entire command tree. If this is the case you may prefer to `GenReST` instead of `GenReSTTree` - -```go - out := new(bytes.Buffer) - err := doc.GenReST(cmd, out) - if err != nil { - log.Fatal(err) - } -``` - -This will write the ReST doc for ONLY "cmd" into the out, buffer. - -## Customize the output - -Both `GenReST` and `GenReSTTree` have alternate versions with callbacks to get some control of the output: - -```go -func GenReSTTreeCustom(cmd *Command, dir string, filePrepender func(string) string, linkHandler func(string, string) string) error { - //... -} -``` - -```go -func GenReSTCustom(cmd *Command, out *bytes.Buffer, linkHandler func(string, string) string) error { - //... -} -``` - -The `filePrepender` will prepend the return value given the full filepath to the rendered ReST file. A common use case is to add front matter to use the generated documentation with [Hugo](http://gohugo.io/): - -```go -const fmTemplate = `--- -date: %s -title: "%s" -slug: %s -url: %s ---- -` -filePrepender := func(filename string) string { - now := time.Now().Format(time.RFC3339) - name := filepath.Base(filename) - base := strings.TrimSuffix(name, path.Ext(name)) - url := "/commands/" + strings.ToLower(base) + "/" - return fmt.Sprintf(fmTemplate, now, strings.Replace(base, "_", " ", -1), base, url) -} -``` - -The `linkHandler` can be used to customize the rendered links to the commands, given a command name and reference. This is useful while converting rst to html or while generating documentation with tools like Sphinx where `:ref:` is used: - -```go -// Sphinx cross-referencing format -linkHandler := func(name, ref string) string { - return fmt.Sprintf(":ref:`%s <%s>`", name, ref) -} -``` diff --git a/vendor/github.com/spf13/cobra/doc/util.go b/vendor/github.com/spf13/cobra/doc/util.go deleted file mode 100644 index bffde94d50..0000000000 --- a/vendor/github.com/spf13/cobra/doc/util.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2015 Red Hat Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package doc - -import ( - "strings" - - "github.com/spf13/cobra" -) - -// Test to see if we have a reason to print See Also information in docs -// Basically this is a test for a parent command or a subcommand which is -// both not deprecated and not the autogenerated help command. -func hasSeeAlso(cmd *cobra.Command) bool { - if cmd.HasParent() { - return true - } - for _, c := range cmd.Commands() { - if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() { - continue - } - return true - } - return false -} - -// Temporary workaround for yaml lib generating incorrect yaml with long strings -// that do not contain \n. -func forceMultiLine(s string) string { - if len(s) > 60 && !strings.Contains(s, "\n") { - s = s + "\n" - } - return s -} - -type byName []*cobra.Command - -func (s byName) Len() int { return len(s) } -func (s byName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s byName) Less(i, j int) bool { return s[i].Name() < s[j].Name() } diff --git a/vendor/github.com/spf13/cobra/doc/yaml_docs.go b/vendor/github.com/spf13/cobra/doc/yaml_docs.go deleted file mode 100644 index 96e6ad721e..0000000000 --- a/vendor/github.com/spf13/cobra/doc/yaml_docs.go +++ /dev/null @@ -1,174 +0,0 @@ -// Copyright 2016 French Ben. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package doc - -import ( - "fmt" - "io" - "os" - "path/filepath" - "sort" - "strings" - - "github.com/spf13/cobra" - "github.com/spf13/pflag" - "gopkg.in/yaml.v2" -) - -type cmdOption struct { - Name string - Shorthand string `yaml:",omitempty"` - DefaultValue string `yaml:"default_value,omitempty"` - Usage string `yaml:",omitempty"` -} - -type cmdDoc struct { - Name string - Synopsis string `yaml:",omitempty"` - Description string `yaml:",omitempty"` - Usage string `yaml:",omitempty"` - Options []cmdOption `yaml:",omitempty"` - InheritedOptions []cmdOption `yaml:"inherited_options,omitempty"` - Example string `yaml:",omitempty"` - SeeAlso []string `yaml:"see_also,omitempty"` -} - -// GenYamlTree creates yaml structured ref files for this command and all descendants -// in the directory given. This function may not work -// correctly if your command names have `-` in them. If you have `cmd` with two -// subcmds, `sub` and `sub-third`, and `sub` has a subcommand called `third` -// it is undefined which help output will be in the file `cmd-sub-third.1`. -func GenYamlTree(cmd *cobra.Command, dir string) error { - identity := func(s string) string { return s } - emptyStr := func(s string) string { return "" } - return GenYamlTreeCustom(cmd, dir, emptyStr, identity) -} - -// GenYamlTreeCustom creates yaml structured ref files. -func GenYamlTreeCustom(cmd *cobra.Command, dir string, filePrepender, linkHandler func(string) string) error { - for _, c := range cmd.Commands() { - if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() { - continue - } - if err := GenYamlTreeCustom(c, dir, filePrepender, linkHandler); err != nil { - return err - } - } - - basename := strings.Replace(cmd.CommandPath(), " ", "_", -1) + ".yaml" - filename := filepath.Join(dir, basename) - f, err := os.Create(filename) - if err != nil { - return err - } - defer f.Close() - - if _, err := io.WriteString(f, filePrepender(filename)); err != nil { - return err - } - if err := GenYamlCustom(cmd, f, linkHandler); err != nil { - return err - } - return nil -} - -// GenYaml creates yaml output. -func GenYaml(cmd *cobra.Command, w io.Writer) error { - return GenYamlCustom(cmd, w, func(s string) string { return s }) -} - -// GenYamlCustom creates custom yaml output. -func GenYamlCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string) string) error { - cmd.InitDefaultHelpCmd() - cmd.InitDefaultHelpFlag() - - yamlDoc := cmdDoc{} - yamlDoc.Name = cmd.CommandPath() - - yamlDoc.Synopsis = forceMultiLine(cmd.Short) - yamlDoc.Description = forceMultiLine(cmd.Long) - - if cmd.Runnable() { - yamlDoc.Usage = cmd.UseLine() - } - - if len(cmd.Example) > 0 { - yamlDoc.Example = cmd.Example - } - - flags := cmd.NonInheritedFlags() - if flags.HasFlags() { - yamlDoc.Options = genFlagResult(flags) - } - flags = cmd.InheritedFlags() - if flags.HasFlags() { - yamlDoc.InheritedOptions = genFlagResult(flags) - } - - if hasSeeAlso(cmd) { - result := []string{} - if cmd.HasParent() { - parent := cmd.Parent() - result = append(result, parent.CommandPath()+" - "+parent.Short) - } - children := cmd.Commands() - sort.Sort(byName(children)) - for _, child := range children { - if !child.IsAvailableCommand() || child.IsAdditionalHelpTopicCommand() { - continue - } - result = append(result, child.Name()+" - "+child.Short) - } - yamlDoc.SeeAlso = result - } - - final, err := yaml.Marshal(&yamlDoc) - if err != nil { - fmt.Println(err) - os.Exit(1) - } - - if _, err := w.Write(final); err != nil { - return err - } - return nil -} - -func genFlagResult(flags *pflag.FlagSet) []cmdOption { - var result []cmdOption - - flags.VisitAll(func(flag *pflag.Flag) { - // Todo, when we mark a shorthand is deprecated, but specify an empty message. - // The flag.ShorthandDeprecated is empty as the shorthand is deprecated. - // Using len(flag.ShorthandDeprecated) > 0 can't handle this, others are ok. - if !(len(flag.ShorthandDeprecated) > 0) && len(flag.Shorthand) > 0 { - opt := cmdOption{ - flag.Name, - flag.Shorthand, - flag.DefValue, - forceMultiLine(flag.Usage), - } - result = append(result, opt) - } else { - opt := cmdOption{ - Name: flag.Name, - DefaultValue: forceMultiLine(flag.DefValue), - Usage: forceMultiLine(flag.Usage), - } - result = append(result, opt) - } - }) - - return result -} diff --git a/vendor/github.com/spf13/cobra/doc/yaml_docs.md b/vendor/github.com/spf13/cobra/doc/yaml_docs.md deleted file mode 100644 index 1a9b7c6a3c..0000000000 --- a/vendor/github.com/spf13/cobra/doc/yaml_docs.md +++ /dev/null @@ -1,112 +0,0 @@ -# Generating Yaml Docs For Your Own cobra.Command - -Generating yaml files from a cobra command is incredibly easy. An example is as follows: - -```go -package main - -import ( - "log" - - "github.com/spf13/cobra" - "github.com/spf13/cobra/doc" -) - -func main() { - cmd := &cobra.Command{ - Use: "test", - Short: "my test program", - } - err := doc.GenYamlTree(cmd, "/tmp") - if err != nil { - log.Fatal(err) - } -} -``` - -That will get you a Yaml document `/tmp/test.yaml` - -## Generate yaml docs for the entire command tree - -This program can actually generate docs for the kubectl command in the kubernetes project - -```go -package main - -import ( - "io/ioutil" - "log" - "os" - - "k8s.io/kubernetes/pkg/kubectl/cmd" - cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" - - "github.com/spf13/cobra/doc" -) - -func main() { - kubectl := cmd.NewKubectlCommand(cmdutil.NewFactory(nil), os.Stdin, ioutil.Discard, ioutil.Discard) - err := doc.GenYamlTree(kubectl, "./") - if err != nil { - log.Fatal(err) - } -} -``` - -This will generate a whole series of files, one for each command in the tree, in the directory specified (in this case "./") - -## Generate yaml docs for a single command - -You may wish to have more control over the output, or only generate for a single command, instead of the entire command tree. If this is the case you may prefer to `GenYaml` instead of `GenYamlTree` - -```go - out := new(bytes.Buffer) - doc.GenYaml(cmd, out) -``` - -This will write the yaml doc for ONLY "cmd" into the out, buffer. - -## Customize the output - -Both `GenYaml` and `GenYamlTree` have alternate versions with callbacks to get some control of the output: - -```go -func GenYamlTreeCustom(cmd *Command, dir string, filePrepender, linkHandler func(string) string) error { - //... -} -``` - -```go -func GenYamlCustom(cmd *Command, out *bytes.Buffer, linkHandler func(string) string) error { - //... -} -``` - -The `filePrepender` will prepend the return value given the full filepath to the rendered Yaml file. A common use case is to add front matter to use the generated documentation with [Hugo](http://gohugo.io/): - -```go -const fmTemplate = `--- -date: %s -title: "%s" -slug: %s -url: %s ---- -` - -filePrepender := func(filename string) string { - now := time.Now().Format(time.RFC3339) - name := filepath.Base(filename) - base := strings.TrimSuffix(name, path.Ext(name)) - url := "/commands/" + strings.ToLower(base) + "/" - return fmt.Sprintf(fmTemplate, now, strings.Replace(base, "_", " ", -1), base, url) -} -``` - -The `linkHandler` can be used to customize the rendered internal links to the commands, given a filename: - -```go -linkHandler := func(name string) string { - base := strings.TrimSuffix(name, path.Ext(name)) - return "/commands/" + strings.ToLower(base) + "/" -} -``` diff --git a/vendor/modules.txt b/vendor/modules.txt index dd245e7ccd..ffef8589bf 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -18,10 +18,6 @@ github.com/containerd/containerd/platforms # github.com/coreos/etcd v3.3.25+incompatible ## explicit github.com/coreos/etcd/raft/raftpb -# github.com/cpuguy83/go-md2man/v2 v2.0.1 -## explicit -github.com/cpuguy83/go-md2man/v2 -github.com/cpuguy83/go-md2man/v2/md2man # github.com/creack/pty v1.1.11 ## explicit github.com/creack/pty @@ -193,15 +189,12 @@ github.com/prometheus/common/model github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/russross/blackfriday/v2 v2.1.0 -github.com/russross/blackfriday/v2 # github.com/sirupsen/logrus v1.8.1 ## explicit github.com/sirupsen/logrus # github.com/spf13/cobra v1.1.3 ## explicit github.com/spf13/cobra -github.com/spf13/cobra/doc # github.com/spf13/pflag v1.0.5 ## explicit github.com/spf13/pflag