mirror of https://github.com/docker/cli.git
revendor notary for updated import/export packages, update with rebase
Signed-off-by: Riyaz Faizullabhoy <riyaz.faizullabhoy@docker.com>
This commit is contained in:
parent
2d8cc3cd80
commit
b4ef2ddb8b
|
@ -11,8 +11,8 @@ import (
|
||||||
"github.com/docker/cli/cli/trust"
|
"github.com/docker/cli/cli/trust"
|
||||||
"github.com/docker/notary"
|
"github.com/docker/notary"
|
||||||
"github.com/docker/notary/storage"
|
"github.com/docker/notary/storage"
|
||||||
|
"github.com/docker/notary/trustmanager"
|
||||||
tufutils "github.com/docker/notary/tuf/utils"
|
tufutils "github.com/docker/notary/tuf/utils"
|
||||||
"github.com/docker/notary/utils"
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -46,7 +46,7 @@ func loadPrivKey(streams command.Streams, keyPath string, options keyLoadOptions
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
privKeyImporters := []utils.Importer{keyFileStore}
|
privKeyImporters := []trustmanager.Importer{keyFileStore}
|
||||||
|
|
||||||
fmt.Fprintf(streams.Out(), "\nLoading key from \"%s\"...\n", keyPath)
|
fmt.Fprintf(streams.Out(), "\nLoading key from \"%s\"...\n", keyPath)
|
||||||
|
|
||||||
|
@ -85,10 +85,10 @@ func getPrivKeyBytesFromPath(keyPath string) ([]byte, error) {
|
||||||
return keyBytes, nil
|
return keyBytes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func loadPrivKeyBytesToStore(privKeyBytes []byte, privKeyImporters []utils.Importer, keyPath, keyName string, passRet notary.PassRetriever) error {
|
func loadPrivKeyBytesToStore(privKeyBytes []byte, privKeyImporters []trustmanager.Importer, keyPath, keyName string, passRet notary.PassRetriever) error {
|
||||||
if _, _, err := tufutils.ExtractPrivateKeyAttributes(privKeyBytes); err != nil {
|
if _, _, err := tufutils.ExtractPrivateKeyAttributes(privKeyBytes); err != nil {
|
||||||
return fmt.Errorf("provided file %s is not a supported private key - to add a signer's public key use docker trust signer add", keyPath)
|
return fmt.Errorf("provided file %s is not a supported private key - to add a signer's public key use docker trust signer add", keyPath)
|
||||||
}
|
}
|
||||||
// Make a reader, rewind the file pointer
|
// Make a reader, rewind the file pointer
|
||||||
return utils.ImportKeys(bytes.NewReader(privKeyBytes), privKeyImporters, keyName, "", passRet)
|
return trustmanager.ImportKeys(bytes.NewReader(privKeyBytes), privKeyImporters, keyName, "", passRet)
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,8 +14,8 @@ import (
|
||||||
"github.com/docker/notary"
|
"github.com/docker/notary"
|
||||||
"github.com/docker/notary/passphrase"
|
"github.com/docker/notary/passphrase"
|
||||||
"github.com/docker/notary/storage"
|
"github.com/docker/notary/storage"
|
||||||
|
"github.com/docker/notary/trustmanager"
|
||||||
tufutils "github.com/docker/notary/tuf/utils"
|
tufutils "github.com/docker/notary/tuf/utils"
|
||||||
"github.com/docker/notary/utils"
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -125,7 +125,7 @@ func testLoadKeyFromPath(t *testing.T, privKeyID string, privKeyFixture []byte)
|
||||||
cannedPasswordRetriever := passphrase.ConstantRetriever(passwd)
|
cannedPasswordRetriever := passphrase.ConstantRetriever(passwd)
|
||||||
keyFileStore, err := storage.NewPrivateKeyFileStorage(keyStorageDir, notary.KeyExtension)
|
keyFileStore, err := storage.NewPrivateKeyFileStorage(keyStorageDir, notary.KeyExtension)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
privKeyImporters := []utils.Importer{keyFileStore}
|
privKeyImporters := []trustmanager.Importer{keyFileStore}
|
||||||
|
|
||||||
// get the privKeyBytes
|
// get the privKeyBytes
|
||||||
privKeyBytes, err := getPrivKeyBytesFromPath(privKeyFilepath)
|
privKeyBytes, err := getPrivKeyBytesFromPath(privKeyFilepath)
|
||||||
|
@ -226,7 +226,7 @@ func TestLoadPubKeyFailure(t *testing.T) {
|
||||||
cannedPasswordRetriever := passphrase.ConstantRetriever(passwd)
|
cannedPasswordRetriever := passphrase.ConstantRetriever(passwd)
|
||||||
keyFileStore, err := storage.NewPrivateKeyFileStorage(keyStorageDir, notary.KeyExtension)
|
keyFileStore, err := storage.NewPrivateKeyFileStorage(keyStorageDir, notary.KeyExtension)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
privKeyImporters := []utils.Importer{keyFileStore}
|
privKeyImporters := []trustmanager.Importer{keyFileStore}
|
||||||
|
|
||||||
pubKeyBytes, err := getPrivKeyBytesFromPath(pubKeyFilepath)
|
pubKeyBytes, err := getPrivKeyBytesFromPath(pubKeyFilepath)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
|
@ -11,10 +11,9 @@ import (
|
||||||
|
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/docker/cli/cli/command/image"
|
||||||
"github.com/docker/cli/cli/trust"
|
"github.com/docker/cli/cli/trust"
|
||||||
"github.com/docker/cli/opts"
|
"github.com/docker/cli/opts"
|
||||||
"github.com/docker/docker/api/types"
|
|
||||||
registrytypes "github.com/docker/docker/api/types/registry"
|
|
||||||
"github.com/docker/notary/client"
|
"github.com/docker/notary/client"
|
||||||
"github.com/docker/notary/tuf/data"
|
"github.com/docker/notary/tuf/data"
|
||||||
tufutils "github.com/docker/notary/tuf/utils"
|
tufutils "github.com/docker/notary/tuf/utils"
|
||||||
|
@ -80,15 +79,12 @@ func addSignerToImage(cli command.Cli, signerName string, imageName string, keyP
|
||||||
fmt.Fprintf(cli.Out(), "\nAdding signer \"%s\" to %s...\n", signerName, imageName)
|
fmt.Fprintf(cli.Out(), "\nAdding signer \"%s\" to %s...\n", signerName, imageName)
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
authResolver := func(ctx context.Context, index *registrytypes.IndexInfo) types.AuthConfig {
|
imgRefAndAuth, err := trust.GetImageReferencesAndAuth(ctx, image.AuthResolver(cli), imageName)
|
||||||
return command.ResolveAuthConfig(ctx, cli, index)
|
|
||||||
}
|
|
||||||
imgRefAndAuth, err := trust.GetImageReferencesAndAuth(ctx, authResolver, imageName)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
notaryRepo, err := cli.NotaryClient(*imgRefAndAuth, trust.ActionsPushAndPull)
|
notaryRepo, err := cli.NotaryClient(imgRefAndAuth, trust.ActionsPushAndPull)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return trust.NotaryError(imgRefAndAuth.Reference().Name(), err)
|
return trust.NotaryError(imgRefAndAuth.Reference().Name(), err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,9 +8,8 @@ import (
|
||||||
|
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/docker/cli/cli/command/image"
|
||||||
"github.com/docker/cli/cli/trust"
|
"github.com/docker/cli/cli/trust"
|
||||||
"github.com/docker/docker/api/types"
|
|
||||||
registrytypes "github.com/docker/docker/api/types/registry"
|
|
||||||
"github.com/docker/notary/client"
|
"github.com/docker/notary/client"
|
||||||
"github.com/docker/notary/tuf/data"
|
"github.com/docker/notary/tuf/data"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
@ -75,14 +74,11 @@ func isLastSignerForReleases(roleWithSig data.Role, allRoles []client.RoleWithSi
|
||||||
return counter < releasesRoleWithSigs.Threshold, nil
|
return counter < releasesRoleWithSigs.Threshold, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func removeSingleSigner(cli command.Cli, image, signerName string, forceYes bool) error {
|
func removeSingleSigner(cli command.Cli, imageName, signerName string, forceYes bool) error {
|
||||||
fmt.Fprintf(cli.Out(), "\nRemoving signer \"%s\" from %s...\n", signerName, image)
|
fmt.Fprintf(cli.Out(), "\nRemoving signer \"%s\" from %s...\n", signerName, imageName)
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
authResolver := func(ctx context.Context, index *registrytypes.IndexInfo) types.AuthConfig {
|
imgRefAndAuth, err := trust.GetImageReferencesAndAuth(ctx, image.AuthResolver(cli), imageName)
|
||||||
return command.ResolveAuthConfig(ctx, cli, index)
|
|
||||||
}
|
|
||||||
imgRefAndAuth, err := trust.GetImageReferencesAndAuth(ctx, authResolver, image)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -91,13 +87,13 @@ func removeSingleSigner(cli command.Cli, image, signerName string, forceYes bool
|
||||||
if signerDelegation == releasesRoleTUFName {
|
if signerDelegation == releasesRoleTUFName {
|
||||||
return fmt.Errorf("releases is a reserved keyword and cannot be removed")
|
return fmt.Errorf("releases is a reserved keyword and cannot be removed")
|
||||||
}
|
}
|
||||||
notaryRepo, err := cli.NotaryClient(*imgRefAndAuth, trust.ActionsPushAndPull)
|
notaryRepo, err := cli.NotaryClient(imgRefAndAuth, trust.ActionsPushAndPull)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return trust.NotaryError(imgRefAndAuth.Reference().Name(), err)
|
return trust.NotaryError(imgRefAndAuth.Reference().Name(), err)
|
||||||
}
|
}
|
||||||
delegationRoles, err := notaryRepo.GetDelegationRoles()
|
delegationRoles, err := notaryRepo.GetDelegationRoles()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error retrieving signers for %s", image)
|
return fmt.Errorf("Error retrieving signers for %s", imageName)
|
||||||
}
|
}
|
||||||
var role data.Role
|
var role data.Role
|
||||||
for _, delRole := range delegationRoles {
|
for _, delRole := range delegationRoles {
|
||||||
|
@ -107,7 +103,7 @@ func removeSingleSigner(cli command.Cli, image, signerName string, forceYes bool
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if role.Name == "" {
|
if role.Name == "" {
|
||||||
return fmt.Errorf("No signer %s for image %s", signerName, image)
|
return fmt.Errorf("No signer %s for image %s", signerName, imageName)
|
||||||
}
|
}
|
||||||
allRoles, err := notaryRepo.ListRoles()
|
allRoles, err := notaryRepo.ListRoles()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -117,7 +113,7 @@ func removeSingleSigner(cli command.Cli, image, signerName string, forceYes bool
|
||||||
removeSigner := command.PromptForConfirmation(os.Stdin, cli.Out(), fmt.Sprintf("The signer \"%s\" signed the last released version of %s. "+
|
removeSigner := command.PromptForConfirmation(os.Stdin, cli.Out(), fmt.Sprintf("The signer \"%s\" signed the last released version of %s. "+
|
||||||
"Removing this signer will make %s unpullable. "+
|
"Removing this signer will make %s unpullable. "+
|
||||||
"Are you sure you want to continue?",
|
"Are you sure you want to continue?",
|
||||||
signerName, image, image,
|
signerName, imageName, imageName,
|
||||||
))
|
))
|
||||||
|
|
||||||
if !removeSigner {
|
if !removeSigner {
|
||||||
|
@ -136,6 +132,6 @@ func removeSingleSigner(cli command.Cli, image, signerName string, forceYes bool
|
||||||
if err = notaryRepo.Publish(); err != nil {
|
if err = notaryRepo.Publish(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
fmt.Fprintf(cli.Out(), "Successfully removed %s from %s\n", signerName, image)
|
fmt.Fprintf(cli.Out(), "Successfully removed %s from %s\n", signerName, imageName)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
14
vendor.conf
14
vendor.conf
|
@ -1,9 +1,5 @@
|
||||||
github.com/agl/ed25519 d2b94fd789ea21d12fac1a4443dd3a3f79cda72c
|
github.com/agl/ed25519 d2b94fd789ea21d12fac1a4443dd3a3f79cda72c
|
||||||
github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109
|
github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109
|
||||||
github.com/bugsnag/bugsnag-go 13fd6b8acda029830ef9904df6b63be0a83369d0
|
|
||||||
github.com/bugsnag/osext 0dd3f918b21bec95ace9dc86c7e70266cfc5c702
|
|
||||||
github.com/bugsnag/panicwrap e2c28503fcd0675329da73bf48b33404db873782
|
|
||||||
github.com/BurntSushi/toml a368813c5e648fee92e5f6c30e3944ff9d5e8895
|
|
||||||
github.com/containerd/continuity 22694c680ee48fb8f50015b44618517e2bde77e8
|
github.com/containerd/continuity 22694c680ee48fb8f50015b44618517e2bde77e8
|
||||||
github.com/coreos/etcd 824277cb3a577a0e8c829ca9ec557b973fe06d20
|
github.com/coreos/etcd 824277cb3a577a0e8c829ca9ec557b973fe06d20
|
||||||
github.com/cpuguy83/go-md2man a65d4d2de4d5f7c74868dfa9b202a3c8be315aaa
|
github.com/cpuguy83/go-md2man a65d4d2de4d5f7c74868dfa9b202a3c8be315aaa
|
||||||
|
@ -18,19 +14,15 @@ github.com/docker/go d30aec9fd63c35133f8f79c3412ad91a3b08be06
|
||||||
github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d
|
github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d
|
||||||
github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
|
github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
|
||||||
github.com/docker/go-units 9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1
|
github.com/docker/go-units 9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1
|
||||||
github.com/docker/notary 8a1de3cfc3f1408e54d6364fc949214a4883a9f3
|
github.com/docker/notary 5d55a30c1bec010a8c6df4c09889acfb4e0a7942
|
||||||
github.com/docker/swarmkit 872861d2ae46958af7ead1d5fffb092c73afbaf0
|
github.com/docker/swarmkit 872861d2ae46958af7ead1d5fffb092c73afbaf0
|
||||||
github.com/flynn-archive/go-shlex 3f9db97f856818214da2e1057f8ad84803971cff
|
github.com/flynn-archive/go-shlex 3f9db97f856818214da2e1057f8ad84803971cff
|
||||||
github.com/gogo/protobuf v0.4
|
github.com/gogo/protobuf v0.4
|
||||||
github.com/golang/protobuf 7a211bcf3bce0e3f1d74f9894916e6f116ae83b4
|
github.com/golang/protobuf 7a211bcf3bce0e3f1d74f9894916e6f116ae83b4
|
||||||
github.com/gorilla/context v1.1
|
github.com/gorilla/context v1.1
|
||||||
github.com/gorilla/mux v1.1
|
github.com/gorilla/mux v1.1
|
||||||
github.com/go-sql-driver/mysql a0583e0143b1624142adab07e0e97fe106d99561
|
|
||||||
github.com/gotestyourself/gotestyourself v1.2.0
|
github.com/gotestyourself/gotestyourself v1.2.0
|
||||||
github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
|
github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
|
||||||
github.com/kr/pretty bc9499caa0f45ee5edb2f0209fbd61fbf3d9018f
|
|
||||||
github.com/kr/text 6807e777504f54ad073ecef66747de158294b639
|
|
||||||
github.com/magiconair/properties 624009598839a9432bd97bb75552389422357723
|
|
||||||
github.com/mattn/go-shellwords v1.0.3
|
github.com/mattn/go-shellwords v1.0.3
|
||||||
github.com/Microsoft/go-winio v0.4.4
|
github.com/Microsoft/go-winio v0.4.4
|
||||||
github.com/miekg/pkcs11 df8ae6ca730422dba20c768ff38ef7d79077a59f
|
github.com/miekg/pkcs11 df8ae6ca730422dba20c768ff38ef7d79077a59f
|
||||||
|
@ -44,13 +36,9 @@ github.com/pkg/errors 839d9e913e063e28dfd0e6c7b7512793e0a48be9
|
||||||
github.com/pmezard/go-difflib v1.0.0
|
github.com/pmezard/go-difflib v1.0.0
|
||||||
github.com/russross/blackfriday 1d6b8e9301e720b08a8938b8c25c018285885438
|
github.com/russross/blackfriday 1d6b8e9301e720b08a8938b8c25c018285885438
|
||||||
github.com/shurcooL/sanitized_anchor_name 10ef21a441db47d8b13ebcc5fd2310f636973c77
|
github.com/shurcooL/sanitized_anchor_name 10ef21a441db47d8b13ebcc5fd2310f636973c77
|
||||||
github.com/Shopify/logrus-bugsnag 6dbc35f2c30d1e37549f9673dd07912452ab28a5
|
|
||||||
github.com/sirupsen/logrus v1.0.3
|
github.com/sirupsen/logrus v1.0.3
|
||||||
github.com/spf13/cast 4d07383ffe94b5e5a6fa3af9211374a4507a0184
|
|
||||||
github.com/spf13/cobra v1.5.1 https://github.com/dnephin/cobra.git
|
github.com/spf13/cobra v1.5.1 https://github.com/dnephin/cobra.git
|
||||||
github.com/spf13/jwalterweatherman 3d60171a64319ef63c78bd45bd60e6eab1e75f8b
|
|
||||||
github.com/spf13/pflag 9ff6c6923cfffbcd502984b8e0c80539a94968b7
|
github.com/spf13/pflag 9ff6c6923cfffbcd502984b8e0c80539a94968b7
|
||||||
github.com/spf13/viper be5ff3e4840cf692388bde7a057595a474ef379e
|
|
||||||
github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987
|
github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987
|
||||||
github.com/tonistiigi/fsutil dea3a0da73aee887fc02142d995be764106ac5e2
|
github.com/tonistiigi/fsutil dea3a0da73aee887fc02142d995be764106ac5e2
|
||||||
github.com/xeipuuv/gojsonpointer e0fe6f68307607d540ed8eac07a342c33fa1b54a
|
github.com/xeipuuv/gojsonpointer e0fe6f68307607d540ed8eac07a342c33fa1b54a
|
||||||
|
|
|
@ -1,21 +0,0 @@
|
||||||
The MIT License (MIT)
|
|
||||||
|
|
||||||
Copyright (c) 2013 TOML authors
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in
|
|
||||||
all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
||||||
THE SOFTWARE.
|
|
|
@ -1,218 +0,0 @@
|
||||||
## TOML parser and encoder for Go with reflection
|
|
||||||
|
|
||||||
TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
|
|
||||||
reflection interface similar to Go's standard library `json` and `xml`
|
|
||||||
packages. This package also supports the `encoding.TextUnmarshaler` and
|
|
||||||
`encoding.TextMarshaler` interfaces so that you can define custom data
|
|
||||||
representations. (There is an example of this below.)
|
|
||||||
|
|
||||||
Spec: https://github.com/toml-lang/toml
|
|
||||||
|
|
||||||
Compatible with TOML version
|
|
||||||
[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md)
|
|
||||||
|
|
||||||
Documentation: https://godoc.org/github.com/BurntSushi/toml
|
|
||||||
|
|
||||||
Installation:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
go get github.com/BurntSushi/toml
|
|
||||||
```
|
|
||||||
|
|
||||||
Try the toml validator:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
go get github.com/BurntSushi/toml/cmd/tomlv
|
|
||||||
tomlv some-toml-file.toml
|
|
||||||
```
|
|
||||||
|
|
||||||
[![Build Status](https://travis-ci.org/BurntSushi/toml.svg?branch=master)](https://travis-ci.org/BurntSushi/toml) [![GoDoc](https://godoc.org/github.com/BurntSushi/toml?status.svg)](https://godoc.org/github.com/BurntSushi/toml)
|
|
||||||
|
|
||||||
### Testing
|
|
||||||
|
|
||||||
This package passes all tests in
|
|
||||||
[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder
|
|
||||||
and the encoder.
|
|
||||||
|
|
||||||
### Examples
|
|
||||||
|
|
||||||
This package works similarly to how the Go standard library handles `XML`
|
|
||||||
and `JSON`. Namely, data is loaded into Go values via reflection.
|
|
||||||
|
|
||||||
For the simplest example, consider some TOML file as just a list of keys
|
|
||||||
and values:
|
|
||||||
|
|
||||||
```toml
|
|
||||||
Age = 25
|
|
||||||
Cats = [ "Cauchy", "Plato" ]
|
|
||||||
Pi = 3.14
|
|
||||||
Perfection = [ 6, 28, 496, 8128 ]
|
|
||||||
DOB = 1987-07-05T05:45:00Z
|
|
||||||
```
|
|
||||||
|
|
||||||
Which could be defined in Go as:
|
|
||||||
|
|
||||||
```go
|
|
||||||
type Config struct {
|
|
||||||
Age int
|
|
||||||
Cats []string
|
|
||||||
Pi float64
|
|
||||||
Perfection []int
|
|
||||||
DOB time.Time // requires `import time`
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
And then decoded with:
|
|
||||||
|
|
||||||
```go
|
|
||||||
var conf Config
|
|
||||||
if _, err := toml.Decode(tomlData, &conf); err != nil {
|
|
||||||
// handle error
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
You can also use struct tags if your struct field name doesn't map to a TOML
|
|
||||||
key value directly:
|
|
||||||
|
|
||||||
```toml
|
|
||||||
some_key_NAME = "wat"
|
|
||||||
```
|
|
||||||
|
|
||||||
```go
|
|
||||||
type TOML struct {
|
|
||||||
ObscureKey string `toml:"some_key_NAME"`
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Using the `encoding.TextUnmarshaler` interface
|
|
||||||
|
|
||||||
Here's an example that automatically parses duration strings into
|
|
||||||
`time.Duration` values:
|
|
||||||
|
|
||||||
```toml
|
|
||||||
[[song]]
|
|
||||||
name = "Thunder Road"
|
|
||||||
duration = "4m49s"
|
|
||||||
|
|
||||||
[[song]]
|
|
||||||
name = "Stairway to Heaven"
|
|
||||||
duration = "8m03s"
|
|
||||||
```
|
|
||||||
|
|
||||||
Which can be decoded with:
|
|
||||||
|
|
||||||
```go
|
|
||||||
type song struct {
|
|
||||||
Name string
|
|
||||||
Duration duration
|
|
||||||
}
|
|
||||||
type songs struct {
|
|
||||||
Song []song
|
|
||||||
}
|
|
||||||
var favorites songs
|
|
||||||
if _, err := toml.Decode(blob, &favorites); err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, s := range favorites.Song {
|
|
||||||
fmt.Printf("%s (%s)\n", s.Name, s.Duration)
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
And you'll also need a `duration` type that satisfies the
|
|
||||||
`encoding.TextUnmarshaler` interface:
|
|
||||||
|
|
||||||
```go
|
|
||||||
type duration struct {
|
|
||||||
time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *duration) UnmarshalText(text []byte) error {
|
|
||||||
var err error
|
|
||||||
d.Duration, err = time.ParseDuration(string(text))
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### More complex usage
|
|
||||||
|
|
||||||
Here's an example of how to load the example from the official spec page:
|
|
||||||
|
|
||||||
```toml
|
|
||||||
# This is a TOML document. Boom.
|
|
||||||
|
|
||||||
title = "TOML Example"
|
|
||||||
|
|
||||||
[owner]
|
|
||||||
name = "Tom Preston-Werner"
|
|
||||||
organization = "GitHub"
|
|
||||||
bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
|
|
||||||
dob = 1979-05-27T07:32:00Z # First class dates? Why not?
|
|
||||||
|
|
||||||
[database]
|
|
||||||
server = "192.168.1.1"
|
|
||||||
ports = [ 8001, 8001, 8002 ]
|
|
||||||
connection_max = 5000
|
|
||||||
enabled = true
|
|
||||||
|
|
||||||
[servers]
|
|
||||||
|
|
||||||
# You can indent as you please. Tabs or spaces. TOML don't care.
|
|
||||||
[servers.alpha]
|
|
||||||
ip = "10.0.0.1"
|
|
||||||
dc = "eqdc10"
|
|
||||||
|
|
||||||
[servers.beta]
|
|
||||||
ip = "10.0.0.2"
|
|
||||||
dc = "eqdc10"
|
|
||||||
|
|
||||||
[clients]
|
|
||||||
data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
|
|
||||||
|
|
||||||
# Line breaks are OK when inside arrays
|
|
||||||
hosts = [
|
|
||||||
"alpha",
|
|
||||||
"omega"
|
|
||||||
]
|
|
||||||
```
|
|
||||||
|
|
||||||
And the corresponding Go types are:
|
|
||||||
|
|
||||||
```go
|
|
||||||
type tomlConfig struct {
|
|
||||||
Title string
|
|
||||||
Owner ownerInfo
|
|
||||||
DB database `toml:"database"`
|
|
||||||
Servers map[string]server
|
|
||||||
Clients clients
|
|
||||||
}
|
|
||||||
|
|
||||||
type ownerInfo struct {
|
|
||||||
Name string
|
|
||||||
Org string `toml:"organization"`
|
|
||||||
Bio string
|
|
||||||
DOB time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
type database struct {
|
|
||||||
Server string
|
|
||||||
Ports []int
|
|
||||||
ConnMax int `toml:"connection_max"`
|
|
||||||
Enabled bool
|
|
||||||
}
|
|
||||||
|
|
||||||
type server struct {
|
|
||||||
IP string
|
|
||||||
DC string
|
|
||||||
}
|
|
||||||
|
|
||||||
type clients struct {
|
|
||||||
Data [][]interface{}
|
|
||||||
Hosts []string
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Note that a case insensitive match will be tried if an exact match can't be
|
|
||||||
found.
|
|
||||||
|
|
||||||
A working example of the above can be found in `_examples/example.{go,toml}`.
|
|
|
@ -1,509 +0,0 @@
|
||||||
package toml
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"math"
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func e(format string, args ...interface{}) error {
|
|
||||||
return fmt.Errorf("toml: "+format, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unmarshaler is the interface implemented by objects that can unmarshal a
|
|
||||||
// TOML description of themselves.
|
|
||||||
type Unmarshaler interface {
|
|
||||||
UnmarshalTOML(interface{}) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`.
|
|
||||||
func Unmarshal(p []byte, v interface{}) error {
|
|
||||||
_, err := Decode(string(p), v)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Primitive is a TOML value that hasn't been decoded into a Go value.
|
|
||||||
// When using the various `Decode*` functions, the type `Primitive` may
|
|
||||||
// be given to any value, and its decoding will be delayed.
|
|
||||||
//
|
|
||||||
// A `Primitive` value can be decoded using the `PrimitiveDecode` function.
|
|
||||||
//
|
|
||||||
// The underlying representation of a `Primitive` value is subject to change.
|
|
||||||
// Do not rely on it.
|
|
||||||
//
|
|
||||||
// N.B. Primitive values are still parsed, so using them will only avoid
|
|
||||||
// the overhead of reflection. They can be useful when you don't know the
|
|
||||||
// exact type of TOML data until run time.
|
|
||||||
type Primitive struct {
|
|
||||||
undecoded interface{}
|
|
||||||
context Key
|
|
||||||
}
|
|
||||||
|
|
||||||
// DEPRECATED!
|
|
||||||
//
|
|
||||||
// Use MetaData.PrimitiveDecode instead.
|
|
||||||
func PrimitiveDecode(primValue Primitive, v interface{}) error {
|
|
||||||
md := MetaData{decoded: make(map[string]bool)}
|
|
||||||
return md.unify(primValue.undecoded, rvalue(v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrimitiveDecode is just like the other `Decode*` functions, except it
|
|
||||||
// decodes a TOML value that has already been parsed. Valid primitive values
|
|
||||||
// can *only* be obtained from values filled by the decoder functions,
|
|
||||||
// including this method. (i.e., `v` may contain more `Primitive`
|
|
||||||
// values.)
|
|
||||||
//
|
|
||||||
// Meta data for primitive values is included in the meta data returned by
|
|
||||||
// the `Decode*` functions with one exception: keys returned by the Undecoded
|
|
||||||
// method will only reflect keys that were decoded. Namely, any keys hidden
|
|
||||||
// behind a Primitive will be considered undecoded. Executing this method will
|
|
||||||
// update the undecoded keys in the meta data. (See the example.)
|
|
||||||
func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
|
|
||||||
md.context = primValue.context
|
|
||||||
defer func() { md.context = nil }()
|
|
||||||
return md.unify(primValue.undecoded, rvalue(v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode will decode the contents of `data` in TOML format into a pointer
|
|
||||||
// `v`.
|
|
||||||
//
|
|
||||||
// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be
|
|
||||||
// used interchangeably.)
|
|
||||||
//
|
|
||||||
// TOML arrays of tables correspond to either a slice of structs or a slice
|
|
||||||
// of maps.
|
|
||||||
//
|
|
||||||
// TOML datetimes correspond to Go `time.Time` values.
|
|
||||||
//
|
|
||||||
// All other TOML types (float, string, int, bool and array) correspond
|
|
||||||
// to the obvious Go types.
|
|
||||||
//
|
|
||||||
// An exception to the above rules is if a type implements the
|
|
||||||
// encoding.TextUnmarshaler interface. In this case, any primitive TOML value
|
|
||||||
// (floats, strings, integers, booleans and datetimes) will be converted to
|
|
||||||
// a byte string and given to the value's UnmarshalText method. See the
|
|
||||||
// Unmarshaler example for a demonstration with time duration strings.
|
|
||||||
//
|
|
||||||
// Key mapping
|
|
||||||
//
|
|
||||||
// TOML keys can map to either keys in a Go map or field names in a Go
|
|
||||||
// struct. The special `toml` struct tag may be used to map TOML keys to
|
|
||||||
// struct fields that don't match the key name exactly. (See the example.)
|
|
||||||
// A case insensitive match to struct names will be tried if an exact match
|
|
||||||
// can't be found.
|
|
||||||
//
|
|
||||||
// The mapping between TOML values and Go values is loose. That is, there
|
|
||||||
// may exist TOML values that cannot be placed into your representation, and
|
|
||||||
// there may be parts of your representation that do not correspond to
|
|
||||||
// TOML values. This loose mapping can be made stricter by using the IsDefined
|
|
||||||
// and/or Undecoded methods on the MetaData returned.
|
|
||||||
//
|
|
||||||
// This decoder will not handle cyclic types. If a cyclic type is passed,
|
|
||||||
// `Decode` will not terminate.
|
|
||||||
func Decode(data string, v interface{}) (MetaData, error) {
|
|
||||||
rv := reflect.ValueOf(v)
|
|
||||||
if rv.Kind() != reflect.Ptr {
|
|
||||||
return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v))
|
|
||||||
}
|
|
||||||
if rv.IsNil() {
|
|
||||||
return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v))
|
|
||||||
}
|
|
||||||
p, err := parse(data)
|
|
||||||
if err != nil {
|
|
||||||
return MetaData{}, err
|
|
||||||
}
|
|
||||||
md := MetaData{
|
|
||||||
p.mapping, p.types, p.ordered,
|
|
||||||
make(map[string]bool, len(p.ordered)), nil,
|
|
||||||
}
|
|
||||||
return md, md.unify(p.mapping, indirect(rv))
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeFile is just like Decode, except it will automatically read the
|
|
||||||
// contents of the file at `fpath` and decode it for you.
|
|
||||||
func DecodeFile(fpath string, v interface{}) (MetaData, error) {
|
|
||||||
bs, err := ioutil.ReadFile(fpath)
|
|
||||||
if err != nil {
|
|
||||||
return MetaData{}, err
|
|
||||||
}
|
|
||||||
return Decode(string(bs), v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeReader is just like Decode, except it will consume all bytes
|
|
||||||
// from the reader and decode it for you.
|
|
||||||
func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
|
|
||||||
bs, err := ioutil.ReadAll(r)
|
|
||||||
if err != nil {
|
|
||||||
return MetaData{}, err
|
|
||||||
}
|
|
||||||
return Decode(string(bs), v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// unify performs a sort of type unification based on the structure of `rv`,
|
|
||||||
// which is the client representation.
|
|
||||||
//
|
|
||||||
// Any type mismatch produces an error. Finding a type that we don't know
|
|
||||||
// how to handle produces an unsupported type error.
|
|
||||||
func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
|
|
||||||
|
|
||||||
// Special case. Look for a `Primitive` value.
|
|
||||||
if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
|
|
||||||
// Save the undecoded data and the key context into the primitive
|
|
||||||
// value.
|
|
||||||
context := make(Key, len(md.context))
|
|
||||||
copy(context, md.context)
|
|
||||||
rv.Set(reflect.ValueOf(Primitive{
|
|
||||||
undecoded: data,
|
|
||||||
context: context,
|
|
||||||
}))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Special case. Unmarshaler Interface support.
|
|
||||||
if rv.CanAddr() {
|
|
||||||
if v, ok := rv.Addr().Interface().(Unmarshaler); ok {
|
|
||||||
return v.UnmarshalTOML(data)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Special case. Handle time.Time values specifically.
|
|
||||||
// TODO: Remove this code when we decide to drop support for Go 1.1.
|
|
||||||
// This isn't necessary in Go 1.2 because time.Time satisfies the encoding
|
|
||||||
// interfaces.
|
|
||||||
if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) {
|
|
||||||
return md.unifyDatetime(data, rv)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Special case. Look for a value satisfying the TextUnmarshaler interface.
|
|
||||||
if v, ok := rv.Interface().(TextUnmarshaler); ok {
|
|
||||||
return md.unifyText(data, v)
|
|
||||||
}
|
|
||||||
// BUG(burntsushi)
|
|
||||||
// The behavior here is incorrect whenever a Go type satisfies the
|
|
||||||
// encoding.TextUnmarshaler interface but also corresponds to a TOML
|
|
||||||
// hash or array. In particular, the unmarshaler should only be applied
|
|
||||||
// to primitive TOML values. But at this point, it will be applied to
|
|
||||||
// all kinds of values and produce an incorrect error whenever those values
|
|
||||||
// are hashes or arrays (including arrays of tables).
|
|
||||||
|
|
||||||
k := rv.Kind()
|
|
||||||
|
|
||||||
// laziness
|
|
||||||
if k >= reflect.Int && k <= reflect.Uint64 {
|
|
||||||
return md.unifyInt(data, rv)
|
|
||||||
}
|
|
||||||
switch k {
|
|
||||||
case reflect.Ptr:
|
|
||||||
elem := reflect.New(rv.Type().Elem())
|
|
||||||
err := md.unify(data, reflect.Indirect(elem))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
rv.Set(elem)
|
|
||||||
return nil
|
|
||||||
case reflect.Struct:
|
|
||||||
return md.unifyStruct(data, rv)
|
|
||||||
case reflect.Map:
|
|
||||||
return md.unifyMap(data, rv)
|
|
||||||
case reflect.Array:
|
|
||||||
return md.unifyArray(data, rv)
|
|
||||||
case reflect.Slice:
|
|
||||||
return md.unifySlice(data, rv)
|
|
||||||
case reflect.String:
|
|
||||||
return md.unifyString(data, rv)
|
|
||||||
case reflect.Bool:
|
|
||||||
return md.unifyBool(data, rv)
|
|
||||||
case reflect.Interface:
|
|
||||||
// we only support empty interfaces.
|
|
||||||
if rv.NumMethod() > 0 {
|
|
||||||
return e("unsupported type %s", rv.Type())
|
|
||||||
}
|
|
||||||
return md.unifyAnything(data, rv)
|
|
||||||
case reflect.Float32:
|
|
||||||
fallthrough
|
|
||||||
case reflect.Float64:
|
|
||||||
return md.unifyFloat64(data, rv)
|
|
||||||
}
|
|
||||||
return e("unsupported type %s", rv.Kind())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
|
|
||||||
tmap, ok := mapping.(map[string]interface{})
|
|
||||||
if !ok {
|
|
||||||
if mapping == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return e("type mismatch for %s: expected table but found %T",
|
|
||||||
rv.Type().String(), mapping)
|
|
||||||
}
|
|
||||||
|
|
||||||
for key, datum := range tmap {
|
|
||||||
var f *field
|
|
||||||
fields := cachedTypeFields(rv.Type())
|
|
||||||
for i := range fields {
|
|
||||||
ff := &fields[i]
|
|
||||||
if ff.name == key {
|
|
||||||
f = ff
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if f == nil && strings.EqualFold(ff.name, key) {
|
|
||||||
f = ff
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if f != nil {
|
|
||||||
subv := rv
|
|
||||||
for _, i := range f.index {
|
|
||||||
subv = indirect(subv.Field(i))
|
|
||||||
}
|
|
||||||
if isUnifiable(subv) {
|
|
||||||
md.decoded[md.context.add(key).String()] = true
|
|
||||||
md.context = append(md.context, key)
|
|
||||||
if err := md.unify(datum, subv); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
md.context = md.context[0 : len(md.context)-1]
|
|
||||||
} else if f.name != "" {
|
|
||||||
// Bad user! No soup for you!
|
|
||||||
return e("cannot write unexported field %s.%s",
|
|
||||||
rv.Type().String(), f.name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
|
|
||||||
tmap, ok := mapping.(map[string]interface{})
|
|
||||||
if !ok {
|
|
||||||
if tmap == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return badtype("map", mapping)
|
|
||||||
}
|
|
||||||
if rv.IsNil() {
|
|
||||||
rv.Set(reflect.MakeMap(rv.Type()))
|
|
||||||
}
|
|
||||||
for k, v := range tmap {
|
|
||||||
md.decoded[md.context.add(k).String()] = true
|
|
||||||
md.context = append(md.context, k)
|
|
||||||
|
|
||||||
rvkey := indirect(reflect.New(rv.Type().Key()))
|
|
||||||
rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
|
|
||||||
if err := md.unify(v, rvval); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
md.context = md.context[0 : len(md.context)-1]
|
|
||||||
|
|
||||||
rvkey.SetString(k)
|
|
||||||
rv.SetMapIndex(rvkey, rvval)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
|
|
||||||
datav := reflect.ValueOf(data)
|
|
||||||
if datav.Kind() != reflect.Slice {
|
|
||||||
if !datav.IsValid() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return badtype("slice", data)
|
|
||||||
}
|
|
||||||
sliceLen := datav.Len()
|
|
||||||
if sliceLen != rv.Len() {
|
|
||||||
return e("expected array length %d; got TOML array of length %d",
|
|
||||||
rv.Len(), sliceLen)
|
|
||||||
}
|
|
||||||
return md.unifySliceArray(datav, rv)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
|
|
||||||
datav := reflect.ValueOf(data)
|
|
||||||
if datav.Kind() != reflect.Slice {
|
|
||||||
if !datav.IsValid() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return badtype("slice", data)
|
|
||||||
}
|
|
||||||
n := datav.Len()
|
|
||||||
if rv.IsNil() || rv.Cap() < n {
|
|
||||||
rv.Set(reflect.MakeSlice(rv.Type(), n, n))
|
|
||||||
}
|
|
||||||
rv.SetLen(n)
|
|
||||||
return md.unifySliceArray(datav, rv)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
|
|
||||||
sliceLen := data.Len()
|
|
||||||
for i := 0; i < sliceLen; i++ {
|
|
||||||
v := data.Index(i).Interface()
|
|
||||||
sliceval := indirect(rv.Index(i))
|
|
||||||
if err := md.unify(v, sliceval); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error {
|
|
||||||
if _, ok := data.(time.Time); ok {
|
|
||||||
rv.Set(reflect.ValueOf(data))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return badtype("time.Time", data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
|
|
||||||
if s, ok := data.(string); ok {
|
|
||||||
rv.SetString(s)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return badtype("string", data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
|
|
||||||
if num, ok := data.(float64); ok {
|
|
||||||
switch rv.Kind() {
|
|
||||||
case reflect.Float32:
|
|
||||||
fallthrough
|
|
||||||
case reflect.Float64:
|
|
||||||
rv.SetFloat(num)
|
|
||||||
default:
|
|
||||||
panic("bug")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return badtype("float", data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
|
|
||||||
if num, ok := data.(int64); ok {
|
|
||||||
if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 {
|
|
||||||
switch rv.Kind() {
|
|
||||||
case reflect.Int, reflect.Int64:
|
|
||||||
// No bounds checking necessary.
|
|
||||||
case reflect.Int8:
|
|
||||||
if num < math.MinInt8 || num > math.MaxInt8 {
|
|
||||||
return e("value %d is out of range for int8", num)
|
|
||||||
}
|
|
||||||
case reflect.Int16:
|
|
||||||
if num < math.MinInt16 || num > math.MaxInt16 {
|
|
||||||
return e("value %d is out of range for int16", num)
|
|
||||||
}
|
|
||||||
case reflect.Int32:
|
|
||||||
if num < math.MinInt32 || num > math.MaxInt32 {
|
|
||||||
return e("value %d is out of range for int32", num)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
rv.SetInt(num)
|
|
||||||
} else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 {
|
|
||||||
unum := uint64(num)
|
|
||||||
switch rv.Kind() {
|
|
||||||
case reflect.Uint, reflect.Uint64:
|
|
||||||
// No bounds checking necessary.
|
|
||||||
case reflect.Uint8:
|
|
||||||
if num < 0 || unum > math.MaxUint8 {
|
|
||||||
return e("value %d is out of range for uint8", num)
|
|
||||||
}
|
|
||||||
case reflect.Uint16:
|
|
||||||
if num < 0 || unum > math.MaxUint16 {
|
|
||||||
return e("value %d is out of range for uint16", num)
|
|
||||||
}
|
|
||||||
case reflect.Uint32:
|
|
||||||
if num < 0 || unum > math.MaxUint32 {
|
|
||||||
return e("value %d is out of range for uint32", num)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
rv.SetUint(unum)
|
|
||||||
} else {
|
|
||||||
panic("unreachable")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return badtype("integer", data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
|
|
||||||
if b, ok := data.(bool); ok {
|
|
||||||
rv.SetBool(b)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return badtype("boolean", data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
|
|
||||||
rv.Set(reflect.ValueOf(data))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error {
|
|
||||||
var s string
|
|
||||||
switch sdata := data.(type) {
|
|
||||||
case TextMarshaler:
|
|
||||||
text, err := sdata.MarshalText()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
s = string(text)
|
|
||||||
case fmt.Stringer:
|
|
||||||
s = sdata.String()
|
|
||||||
case string:
|
|
||||||
s = sdata
|
|
||||||
case bool:
|
|
||||||
s = fmt.Sprintf("%v", sdata)
|
|
||||||
case int64:
|
|
||||||
s = fmt.Sprintf("%d", sdata)
|
|
||||||
case float64:
|
|
||||||
s = fmt.Sprintf("%f", sdata)
|
|
||||||
default:
|
|
||||||
return badtype("primitive (string-like)", data)
|
|
||||||
}
|
|
||||||
if err := v.UnmarshalText([]byte(s)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// rvalue returns a reflect.Value of `v`. All pointers are resolved.
|
|
||||||
func rvalue(v interface{}) reflect.Value {
|
|
||||||
return indirect(reflect.ValueOf(v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// indirect returns the value pointed to by a pointer.
|
|
||||||
// Pointers are followed until the value is not a pointer.
|
|
||||||
// New values are allocated for each nil pointer.
|
|
||||||
//
|
|
||||||
// An exception to this rule is if the value satisfies an interface of
|
|
||||||
// interest to us (like encoding.TextUnmarshaler).
|
|
||||||
func indirect(v reflect.Value) reflect.Value {
|
|
||||||
if v.Kind() != reflect.Ptr {
|
|
||||||
if v.CanSet() {
|
|
||||||
pv := v.Addr()
|
|
||||||
if _, ok := pv.Interface().(TextUnmarshaler); ok {
|
|
||||||
return pv
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
if v.IsNil() {
|
|
||||||
v.Set(reflect.New(v.Type().Elem()))
|
|
||||||
}
|
|
||||||
return indirect(reflect.Indirect(v))
|
|
||||||
}
|
|
||||||
|
|
||||||
func isUnifiable(rv reflect.Value) bool {
|
|
||||||
if rv.CanSet() {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if _, ok := rv.Interface().(TextUnmarshaler); ok {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func badtype(expected string, data interface{}) error {
|
|
||||||
return e("cannot load TOML value of type %T into a Go %s", data, expected)
|
|
||||||
}
|
|
|
@ -1,121 +0,0 @@
|
||||||
package toml
|
|
||||||
|
|
||||||
import "strings"
|
|
||||||
|
|
||||||
// MetaData allows access to meta information about TOML data that may not
|
|
||||||
// be inferrable via reflection. In particular, whether a key has been defined
|
|
||||||
// and the TOML type of a key.
|
|
||||||
type MetaData struct {
|
|
||||||
mapping map[string]interface{}
|
|
||||||
types map[string]tomlType
|
|
||||||
keys []Key
|
|
||||||
decoded map[string]bool
|
|
||||||
context Key // Used only during decoding.
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsDefined returns true if the key given exists in the TOML data. The key
|
|
||||||
// should be specified hierarchially. e.g.,
|
|
||||||
//
|
|
||||||
// // access the TOML key 'a.b.c'
|
|
||||||
// IsDefined("a", "b", "c")
|
|
||||||
//
|
|
||||||
// IsDefined will return false if an empty key given. Keys are case sensitive.
|
|
||||||
func (md *MetaData) IsDefined(key ...string) bool {
|
|
||||||
if len(key) == 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
var hash map[string]interface{}
|
|
||||||
var ok bool
|
|
||||||
var hashOrVal interface{} = md.mapping
|
|
||||||
for _, k := range key {
|
|
||||||
if hash, ok = hashOrVal.(map[string]interface{}); !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if hashOrVal, ok = hash[k]; !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type returns a string representation of the type of the key specified.
|
|
||||||
//
|
|
||||||
// Type will return the empty string if given an empty key or a key that
|
|
||||||
// does not exist. Keys are case sensitive.
|
|
||||||
func (md *MetaData) Type(key ...string) string {
|
|
||||||
fullkey := strings.Join(key, ".")
|
|
||||||
if typ, ok := md.types[fullkey]; ok {
|
|
||||||
return typ.typeString()
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// Key is the type of any TOML key, including key groups. Use (MetaData).Keys
|
|
||||||
// to get values of this type.
|
|
||||||
type Key []string
|
|
||||||
|
|
||||||
func (k Key) String() string {
|
|
||||||
return strings.Join(k, ".")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (k Key) maybeQuotedAll() string {
|
|
||||||
var ss []string
|
|
||||||
for i := range k {
|
|
||||||
ss = append(ss, k.maybeQuoted(i))
|
|
||||||
}
|
|
||||||
return strings.Join(ss, ".")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (k Key) maybeQuoted(i int) string {
|
|
||||||
quote := false
|
|
||||||
for _, c := range k[i] {
|
|
||||||
if !isBareKeyChar(c) {
|
|
||||||
quote = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if quote {
|
|
||||||
return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\""
|
|
||||||
}
|
|
||||||
return k[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (k Key) add(piece string) Key {
|
|
||||||
newKey := make(Key, len(k)+1)
|
|
||||||
copy(newKey, k)
|
|
||||||
newKey[len(k)] = piece
|
|
||||||
return newKey
|
|
||||||
}
|
|
||||||
|
|
||||||
// Keys returns a slice of every key in the TOML data, including key groups.
|
|
||||||
// Each key is itself a slice, where the first element is the top of the
|
|
||||||
// hierarchy and the last is the most specific.
|
|
||||||
//
|
|
||||||
// The list will have the same order as the keys appeared in the TOML data.
|
|
||||||
//
|
|
||||||
// All keys returned are non-empty.
|
|
||||||
func (md *MetaData) Keys() []Key {
|
|
||||||
return md.keys
|
|
||||||
}
|
|
||||||
|
|
||||||
// Undecoded returns all keys that have not been decoded in the order in which
|
|
||||||
// they appear in the original TOML document.
|
|
||||||
//
|
|
||||||
// This includes keys that haven't been decoded because of a Primitive value.
|
|
||||||
// Once the Primitive value is decoded, the keys will be considered decoded.
|
|
||||||
//
|
|
||||||
// Also note that decoding into an empty interface will result in no decoding,
|
|
||||||
// and so no keys will be considered decoded.
|
|
||||||
//
|
|
||||||
// In this sense, the Undecoded keys correspond to keys in the TOML document
|
|
||||||
// that do not have a concrete type in your representation.
|
|
||||||
func (md *MetaData) Undecoded() []Key {
|
|
||||||
undecoded := make([]Key, 0, len(md.keys))
|
|
||||||
for _, key := range md.keys {
|
|
||||||
if !md.decoded[key.String()] {
|
|
||||||
undecoded = append(undecoded, key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return undecoded
|
|
||||||
}
|
|
|
@ -1,27 +0,0 @@
|
||||||
/*
|
|
||||||
Package toml provides facilities for decoding and encoding TOML configuration
|
|
||||||
files via reflection. There is also support for delaying decoding with
|
|
||||||
the Primitive type, and querying the set of keys in a TOML document with the
|
|
||||||
MetaData type.
|
|
||||||
|
|
||||||
The specification implemented: https://github.com/toml-lang/toml
|
|
||||||
|
|
||||||
The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify
|
|
||||||
whether a file is a valid TOML document. It can also be used to print the
|
|
||||||
type of each key in a TOML document.
|
|
||||||
|
|
||||||
Testing
|
|
||||||
|
|
||||||
There are two important types of tests used for this package. The first is
|
|
||||||
contained inside '*_test.go' files and uses the standard Go unit testing
|
|
||||||
framework. These tests are primarily devoted to holistically testing the
|
|
||||||
decoder and encoder.
|
|
||||||
|
|
||||||
The second type of testing is used to verify the implementation's adherence
|
|
||||||
to the TOML specification. These tests have been factored into their own
|
|
||||||
project: https://github.com/BurntSushi/toml-test
|
|
||||||
|
|
||||||
The reason the tests are in a separate project is so that they can be used by
|
|
||||||
any implementation of TOML. Namely, it is language agnostic.
|
|
||||||
*/
|
|
||||||
package toml
|
|
|
@ -1,568 +0,0 @@
|
||||||
package toml
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"reflect"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
type tomlEncodeError struct{ error }
|
|
||||||
|
|
||||||
var (
|
|
||||||
errArrayMixedElementTypes = errors.New(
|
|
||||||
"toml: cannot encode array with mixed element types")
|
|
||||||
errArrayNilElement = errors.New(
|
|
||||||
"toml: cannot encode array with nil element")
|
|
||||||
errNonString = errors.New(
|
|
||||||
"toml: cannot encode a map with non-string key type")
|
|
||||||
errAnonNonStruct = errors.New(
|
|
||||||
"toml: cannot encode an anonymous field that is not a struct")
|
|
||||||
errArrayNoTable = errors.New(
|
|
||||||
"toml: TOML array element cannot contain a table")
|
|
||||||
errNoKey = errors.New(
|
|
||||||
"toml: top-level values must be Go maps or structs")
|
|
||||||
errAnything = errors.New("") // used in testing
|
|
||||||
)
|
|
||||||
|
|
||||||
var quotedReplacer = strings.NewReplacer(
|
|
||||||
"\t", "\\t",
|
|
||||||
"\n", "\\n",
|
|
||||||
"\r", "\\r",
|
|
||||||
"\"", "\\\"",
|
|
||||||
"\\", "\\\\",
|
|
||||||
)
|
|
||||||
|
|
||||||
// Encoder controls the encoding of Go values to a TOML document to some
|
|
||||||
// io.Writer.
|
|
||||||
//
|
|
||||||
// The indentation level can be controlled with the Indent field.
|
|
||||||
type Encoder struct {
|
|
||||||
// A single indentation level. By default it is two spaces.
|
|
||||||
Indent string
|
|
||||||
|
|
||||||
// hasWritten is whether we have written any output to w yet.
|
|
||||||
hasWritten bool
|
|
||||||
w *bufio.Writer
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer
|
|
||||||
// given. By default, a single indentation level is 2 spaces.
|
|
||||||
func NewEncoder(w io.Writer) *Encoder {
|
|
||||||
return &Encoder{
|
|
||||||
w: bufio.NewWriter(w),
|
|
||||||
Indent: " ",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Encode writes a TOML representation of the Go value to the underlying
|
|
||||||
// io.Writer. If the value given cannot be encoded to a valid TOML document,
|
|
||||||
// then an error is returned.
|
|
||||||
//
|
|
||||||
// The mapping between Go values and TOML values should be precisely the same
|
|
||||||
// as for the Decode* functions. Similarly, the TextMarshaler interface is
|
|
||||||
// supported by encoding the resulting bytes as strings. (If you want to write
|
|
||||||
// arbitrary binary data then you will need to use something like base64 since
|
|
||||||
// TOML does not have any binary types.)
|
|
||||||
//
|
|
||||||
// When encoding TOML hashes (i.e., Go maps or structs), keys without any
|
|
||||||
// sub-hashes are encoded first.
|
|
||||||
//
|
|
||||||
// If a Go map is encoded, then its keys are sorted alphabetically for
|
|
||||||
// deterministic output. More control over this behavior may be provided if
|
|
||||||
// there is demand for it.
|
|
||||||
//
|
|
||||||
// Encoding Go values without a corresponding TOML representation---like map
|
|
||||||
// types with non-string keys---will cause an error to be returned. Similarly
|
|
||||||
// for mixed arrays/slices, arrays/slices with nil elements, embedded
|
|
||||||
// non-struct types and nested slices containing maps or structs.
|
|
||||||
// (e.g., [][]map[string]string is not allowed but []map[string]string is OK
|
|
||||||
// and so is []map[string][]string.)
|
|
||||||
func (enc *Encoder) Encode(v interface{}) error {
|
|
||||||
rv := eindirect(reflect.ValueOf(v))
|
|
||||||
if err := enc.safeEncode(Key([]string{}), rv); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return enc.w.Flush()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
|
|
||||||
defer func() {
|
|
||||||
if r := recover(); r != nil {
|
|
||||||
if terr, ok := r.(tomlEncodeError); ok {
|
|
||||||
err = terr.error
|
|
||||||
return
|
|
||||||
}
|
|
||||||
panic(r)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
enc.encode(key, rv)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (enc *Encoder) encode(key Key, rv reflect.Value) {
|
|
||||||
// Special case. Time needs to be in ISO8601 format.
|
|
||||||
// Special case. If we can marshal the type to text, then we used that.
|
|
||||||
// Basically, this prevents the encoder for handling these types as
|
|
||||||
// generic structs (or whatever the underlying type of a TextMarshaler is).
|
|
||||||
switch rv.Interface().(type) {
|
|
||||||
case time.Time, TextMarshaler:
|
|
||||||
enc.keyEqElement(key, rv)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
k := rv.Kind()
|
|
||||||
switch k {
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
|
|
||||||
reflect.Int64,
|
|
||||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
|
|
||||||
reflect.Uint64,
|
|
||||||
reflect.Float32, reflect.Float64, reflect.String, reflect.Bool:
|
|
||||||
enc.keyEqElement(key, rv)
|
|
||||||
case reflect.Array, reflect.Slice:
|
|
||||||
if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) {
|
|
||||||
enc.eArrayOfTables(key, rv)
|
|
||||||
} else {
|
|
||||||
enc.keyEqElement(key, rv)
|
|
||||||
}
|
|
||||||
case reflect.Interface:
|
|
||||||
if rv.IsNil() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
enc.encode(key, rv.Elem())
|
|
||||||
case reflect.Map:
|
|
||||||
if rv.IsNil() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
enc.eTable(key, rv)
|
|
||||||
case reflect.Ptr:
|
|
||||||
if rv.IsNil() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
enc.encode(key, rv.Elem())
|
|
||||||
case reflect.Struct:
|
|
||||||
enc.eTable(key, rv)
|
|
||||||
default:
|
|
||||||
panic(e("unsupported type for key '%s': %s", key, k))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// eElement encodes any value that can be an array element (primitives and
|
|
||||||
// arrays).
|
|
||||||
func (enc *Encoder) eElement(rv reflect.Value) {
|
|
||||||
switch v := rv.Interface().(type) {
|
|
||||||
case time.Time:
|
|
||||||
// Special case time.Time as a primitive. Has to come before
|
|
||||||
// TextMarshaler below because time.Time implements
|
|
||||||
// encoding.TextMarshaler, but we need to always use UTC.
|
|
||||||
enc.wf(v.UTC().Format("2006-01-02T15:04:05Z"))
|
|
||||||
return
|
|
||||||
case TextMarshaler:
|
|
||||||
// Special case. Use text marshaler if it's available for this value.
|
|
||||||
if s, err := v.MarshalText(); err != nil {
|
|
||||||
encPanic(err)
|
|
||||||
} else {
|
|
||||||
enc.writeQuoted(string(s))
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
switch rv.Kind() {
|
|
||||||
case reflect.Bool:
|
|
||||||
enc.wf(strconv.FormatBool(rv.Bool()))
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
|
|
||||||
reflect.Int64:
|
|
||||||
enc.wf(strconv.FormatInt(rv.Int(), 10))
|
|
||||||
case reflect.Uint, reflect.Uint8, reflect.Uint16,
|
|
||||||
reflect.Uint32, reflect.Uint64:
|
|
||||||
enc.wf(strconv.FormatUint(rv.Uint(), 10))
|
|
||||||
case reflect.Float32:
|
|
||||||
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32)))
|
|
||||||
case reflect.Float64:
|
|
||||||
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64)))
|
|
||||||
case reflect.Array, reflect.Slice:
|
|
||||||
enc.eArrayOrSliceElement(rv)
|
|
||||||
case reflect.Interface:
|
|
||||||
enc.eElement(rv.Elem())
|
|
||||||
case reflect.String:
|
|
||||||
enc.writeQuoted(rv.String())
|
|
||||||
default:
|
|
||||||
panic(e("unexpected primitive type: %s", rv.Kind()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// By the TOML spec, all floats must have a decimal with at least one
|
|
||||||
// number on either side.
|
|
||||||
func floatAddDecimal(fstr string) string {
|
|
||||||
if !strings.Contains(fstr, ".") {
|
|
||||||
return fstr + ".0"
|
|
||||||
}
|
|
||||||
return fstr
|
|
||||||
}
|
|
||||||
|
|
||||||
func (enc *Encoder) writeQuoted(s string) {
|
|
||||||
enc.wf("\"%s\"", quotedReplacer.Replace(s))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
|
|
||||||
length := rv.Len()
|
|
||||||
enc.wf("[")
|
|
||||||
for i := 0; i < length; i++ {
|
|
||||||
elem := rv.Index(i)
|
|
||||||
enc.eElement(elem)
|
|
||||||
if i != length-1 {
|
|
||||||
enc.wf(", ")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
enc.wf("]")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
|
|
||||||
if len(key) == 0 {
|
|
||||||
encPanic(errNoKey)
|
|
||||||
}
|
|
||||||
for i := 0; i < rv.Len(); i++ {
|
|
||||||
trv := rv.Index(i)
|
|
||||||
if isNil(trv) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
panicIfInvalidKey(key)
|
|
||||||
enc.newline()
|
|
||||||
enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll())
|
|
||||||
enc.newline()
|
|
||||||
enc.eMapOrStruct(key, trv)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (enc *Encoder) eTable(key Key, rv reflect.Value) {
|
|
||||||
panicIfInvalidKey(key)
|
|
||||||
if len(key) == 1 {
|
|
||||||
// Output an extra newline between top-level tables.
|
|
||||||
// (The newline isn't written if nothing else has been written though.)
|
|
||||||
enc.newline()
|
|
||||||
}
|
|
||||||
if len(key) > 0 {
|
|
||||||
enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll())
|
|
||||||
enc.newline()
|
|
||||||
}
|
|
||||||
enc.eMapOrStruct(key, rv)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) {
|
|
||||||
switch rv := eindirect(rv); rv.Kind() {
|
|
||||||
case reflect.Map:
|
|
||||||
enc.eMap(key, rv)
|
|
||||||
case reflect.Struct:
|
|
||||||
enc.eStruct(key, rv)
|
|
||||||
default:
|
|
||||||
panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (enc *Encoder) eMap(key Key, rv reflect.Value) {
|
|
||||||
rt := rv.Type()
|
|
||||||
if rt.Key().Kind() != reflect.String {
|
|
||||||
encPanic(errNonString)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sort keys so that we have deterministic output. And write keys directly
|
|
||||||
// underneath this key first, before writing sub-structs or sub-maps.
|
|
||||||
var mapKeysDirect, mapKeysSub []string
|
|
||||||
for _, mapKey := range rv.MapKeys() {
|
|
||||||
k := mapKey.String()
|
|
||||||
if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) {
|
|
||||||
mapKeysSub = append(mapKeysSub, k)
|
|
||||||
} else {
|
|
||||||
mapKeysDirect = append(mapKeysDirect, k)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var writeMapKeys = func(mapKeys []string) {
|
|
||||||
sort.Strings(mapKeys)
|
|
||||||
for _, mapKey := range mapKeys {
|
|
||||||
mrv := rv.MapIndex(reflect.ValueOf(mapKey))
|
|
||||||
if isNil(mrv) {
|
|
||||||
// Don't write anything for nil fields.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
enc.encode(key.add(mapKey), mrv)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
writeMapKeys(mapKeysDirect)
|
|
||||||
writeMapKeys(mapKeysSub)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
|
|
||||||
// Write keys for fields directly under this key first, because if we write
|
|
||||||
// a field that creates a new table, then all keys under it will be in that
|
|
||||||
// table (not the one we're writing here).
|
|
||||||
rt := rv.Type()
|
|
||||||
var fieldsDirect, fieldsSub [][]int
|
|
||||||
var addFields func(rt reflect.Type, rv reflect.Value, start []int)
|
|
||||||
addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
|
|
||||||
for i := 0; i < rt.NumField(); i++ {
|
|
||||||
f := rt.Field(i)
|
|
||||||
// skip unexported fields
|
|
||||||
if f.PkgPath != "" && !f.Anonymous {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
frv := rv.Field(i)
|
|
||||||
if f.Anonymous {
|
|
||||||
t := f.Type
|
|
||||||
switch t.Kind() {
|
|
||||||
case reflect.Struct:
|
|
||||||
// Treat anonymous struct fields with
|
|
||||||
// tag names as though they are not
|
|
||||||
// anonymous, like encoding/json does.
|
|
||||||
if getOptions(f.Tag).name == "" {
|
|
||||||
addFields(t, frv, f.Index)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
case reflect.Ptr:
|
|
||||||
if t.Elem().Kind() == reflect.Struct &&
|
|
||||||
getOptions(f.Tag).name == "" {
|
|
||||||
if !frv.IsNil() {
|
|
||||||
addFields(t.Elem(), frv.Elem(), f.Index)
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Fall through to the normal field encoding logic below
|
|
||||||
// for non-struct anonymous fields.
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if typeIsHash(tomlTypeOfGo(frv)) {
|
|
||||||
fieldsSub = append(fieldsSub, append(start, f.Index...))
|
|
||||||
} else {
|
|
||||||
fieldsDirect = append(fieldsDirect, append(start, f.Index...))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
addFields(rt, rv, nil)
|
|
||||||
|
|
||||||
var writeFields = func(fields [][]int) {
|
|
||||||
for _, fieldIndex := range fields {
|
|
||||||
sft := rt.FieldByIndex(fieldIndex)
|
|
||||||
sf := rv.FieldByIndex(fieldIndex)
|
|
||||||
if isNil(sf) {
|
|
||||||
// Don't write anything for nil fields.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
opts := getOptions(sft.Tag)
|
|
||||||
if opts.skip {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
keyName := sft.Name
|
|
||||||
if opts.name != "" {
|
|
||||||
keyName = opts.name
|
|
||||||
}
|
|
||||||
if opts.omitempty && isEmpty(sf) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if opts.omitzero && isZero(sf) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
enc.encode(key.add(keyName), sf)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
writeFields(fieldsDirect)
|
|
||||||
writeFields(fieldsSub)
|
|
||||||
}
|
|
||||||
|
|
||||||
// tomlTypeName returns the TOML type name of the Go value's type. It is
|
|
||||||
// used to determine whether the types of array elements are mixed (which is
|
|
||||||
// forbidden). If the Go value is nil, then it is illegal for it to be an array
|
|
||||||
// element, and valueIsNil is returned as true.
|
|
||||||
|
|
||||||
// Returns the TOML type of a Go value. The type may be `nil`, which means
|
|
||||||
// no concrete TOML type could be found.
|
|
||||||
func tomlTypeOfGo(rv reflect.Value) tomlType {
|
|
||||||
if isNil(rv) || !rv.IsValid() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
switch rv.Kind() {
|
|
||||||
case reflect.Bool:
|
|
||||||
return tomlBool
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
|
|
||||||
reflect.Int64,
|
|
||||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
|
|
||||||
reflect.Uint64:
|
|
||||||
return tomlInteger
|
|
||||||
case reflect.Float32, reflect.Float64:
|
|
||||||
return tomlFloat
|
|
||||||
case reflect.Array, reflect.Slice:
|
|
||||||
if typeEqual(tomlHash, tomlArrayType(rv)) {
|
|
||||||
return tomlArrayHash
|
|
||||||
}
|
|
||||||
return tomlArray
|
|
||||||
case reflect.Ptr, reflect.Interface:
|
|
||||||
return tomlTypeOfGo(rv.Elem())
|
|
||||||
case reflect.String:
|
|
||||||
return tomlString
|
|
||||||
case reflect.Map:
|
|
||||||
return tomlHash
|
|
||||||
case reflect.Struct:
|
|
||||||
switch rv.Interface().(type) {
|
|
||||||
case time.Time:
|
|
||||||
return tomlDatetime
|
|
||||||
case TextMarshaler:
|
|
||||||
return tomlString
|
|
||||||
default:
|
|
||||||
return tomlHash
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
panic("unexpected reflect.Kind: " + rv.Kind().String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// tomlArrayType returns the element type of a TOML array. The type returned
|
|
||||||
// may be nil if it cannot be determined (e.g., a nil slice or a zero length
|
|
||||||
// slize). This function may also panic if it finds a type that cannot be
|
|
||||||
// expressed in TOML (such as nil elements, heterogeneous arrays or directly
|
|
||||||
// nested arrays of tables).
|
|
||||||
func tomlArrayType(rv reflect.Value) tomlType {
|
|
||||||
if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
firstType := tomlTypeOfGo(rv.Index(0))
|
|
||||||
if firstType == nil {
|
|
||||||
encPanic(errArrayNilElement)
|
|
||||||
}
|
|
||||||
|
|
||||||
rvlen := rv.Len()
|
|
||||||
for i := 1; i < rvlen; i++ {
|
|
||||||
elem := rv.Index(i)
|
|
||||||
switch elemType := tomlTypeOfGo(elem); {
|
|
||||||
case elemType == nil:
|
|
||||||
encPanic(errArrayNilElement)
|
|
||||||
case !typeEqual(firstType, elemType):
|
|
||||||
encPanic(errArrayMixedElementTypes)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// If we have a nested array, then we must make sure that the nested
|
|
||||||
// array contains ONLY primitives.
|
|
||||||
// This checks arbitrarily nested arrays.
|
|
||||||
if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) {
|
|
||||||
nest := tomlArrayType(eindirect(rv.Index(0)))
|
|
||||||
if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) {
|
|
||||||
encPanic(errArrayNoTable)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return firstType
|
|
||||||
}
|
|
||||||
|
|
||||||
type tagOptions struct {
|
|
||||||
skip bool // "-"
|
|
||||||
name string
|
|
||||||
omitempty bool
|
|
||||||
omitzero bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func getOptions(tag reflect.StructTag) tagOptions {
|
|
||||||
t := tag.Get("toml")
|
|
||||||
if t == "-" {
|
|
||||||
return tagOptions{skip: true}
|
|
||||||
}
|
|
||||||
var opts tagOptions
|
|
||||||
parts := strings.Split(t, ",")
|
|
||||||
opts.name = parts[0]
|
|
||||||
for _, s := range parts[1:] {
|
|
||||||
switch s {
|
|
||||||
case "omitempty":
|
|
||||||
opts.omitempty = true
|
|
||||||
case "omitzero":
|
|
||||||
opts.omitzero = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return opts
|
|
||||||
}
|
|
||||||
|
|
||||||
func isZero(rv reflect.Value) bool {
|
|
||||||
switch rv.Kind() {
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
|
||||||
return rv.Int() == 0
|
|
||||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
|
||||||
return rv.Uint() == 0
|
|
||||||
case reflect.Float32, reflect.Float64:
|
|
||||||
return rv.Float() == 0.0
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func isEmpty(rv reflect.Value) bool {
|
|
||||||
switch rv.Kind() {
|
|
||||||
case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
|
|
||||||
return rv.Len() == 0
|
|
||||||
case reflect.Bool:
|
|
||||||
return !rv.Bool()
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (enc *Encoder) newline() {
|
|
||||||
if enc.hasWritten {
|
|
||||||
enc.wf("\n")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (enc *Encoder) keyEqElement(key Key, val reflect.Value) {
|
|
||||||
if len(key) == 0 {
|
|
||||||
encPanic(errNoKey)
|
|
||||||
}
|
|
||||||
panicIfInvalidKey(key)
|
|
||||||
enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
|
|
||||||
enc.eElement(val)
|
|
||||||
enc.newline()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (enc *Encoder) wf(format string, v ...interface{}) {
|
|
||||||
if _, err := fmt.Fprintf(enc.w, format, v...); err != nil {
|
|
||||||
encPanic(err)
|
|
||||||
}
|
|
||||||
enc.hasWritten = true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (enc *Encoder) indentStr(key Key) string {
|
|
||||||
return strings.Repeat(enc.Indent, len(key)-1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func encPanic(err error) {
|
|
||||||
panic(tomlEncodeError{err})
|
|
||||||
}
|
|
||||||
|
|
||||||
func eindirect(v reflect.Value) reflect.Value {
|
|
||||||
switch v.Kind() {
|
|
||||||
case reflect.Ptr, reflect.Interface:
|
|
||||||
return eindirect(v.Elem())
|
|
||||||
default:
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func isNil(rv reflect.Value) bool {
|
|
||||||
switch rv.Kind() {
|
|
||||||
case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
|
|
||||||
return rv.IsNil()
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func panicIfInvalidKey(key Key) {
|
|
||||||
for _, k := range key {
|
|
||||||
if len(k) == 0 {
|
|
||||||
encPanic(e("Key '%s' is not a valid table name. Key names "+
|
|
||||||
"cannot be empty.", key.maybeQuotedAll()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func isValidKeyName(s string) bool {
|
|
||||||
return len(s) != 0
|
|
||||||
}
|
|
|
@ -1,19 +0,0 @@
|
||||||
// +build go1.2
|
|
||||||
|
|
||||||
package toml
|
|
||||||
|
|
||||||
// In order to support Go 1.1, we define our own TextMarshaler and
|
|
||||||
// TextUnmarshaler types. For Go 1.2+, we just alias them with the
|
|
||||||
// standard library interfaces.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
|
|
||||||
// so that Go 1.1 can be supported.
|
|
||||||
type TextMarshaler encoding.TextMarshaler
|
|
||||||
|
|
||||||
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
|
|
||||||
// here so that Go 1.1 can be supported.
|
|
||||||
type TextUnmarshaler encoding.TextUnmarshaler
|
|
|
@ -1,18 +0,0 @@
|
||||||
// +build !go1.2
|
|
||||||
|
|
||||||
package toml
|
|
||||||
|
|
||||||
// These interfaces were introduced in Go 1.2, so we add them manually when
|
|
||||||
// compiling for Go 1.1.
|
|
||||||
|
|
||||||
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
|
|
||||||
// so that Go 1.1 can be supported.
|
|
||||||
type TextMarshaler interface {
|
|
||||||
MarshalText() (text []byte, err error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
|
|
||||||
// here so that Go 1.1 can be supported.
|
|
||||||
type TextUnmarshaler interface {
|
|
||||||
UnmarshalText(text []byte) error
|
|
||||||
}
|
|
|
@ -1,953 +0,0 @@
|
||||||
package toml
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
"unicode"
|
|
||||||
"unicode/utf8"
|
|
||||||
)
|
|
||||||
|
|
||||||
type itemType int
|
|
||||||
|
|
||||||
const (
|
|
||||||
itemError itemType = iota
|
|
||||||
itemNIL // used in the parser to indicate no type
|
|
||||||
itemEOF
|
|
||||||
itemText
|
|
||||||
itemString
|
|
||||||
itemRawString
|
|
||||||
itemMultilineString
|
|
||||||
itemRawMultilineString
|
|
||||||
itemBool
|
|
||||||
itemInteger
|
|
||||||
itemFloat
|
|
||||||
itemDatetime
|
|
||||||
itemArray // the start of an array
|
|
||||||
itemArrayEnd
|
|
||||||
itemTableStart
|
|
||||||
itemTableEnd
|
|
||||||
itemArrayTableStart
|
|
||||||
itemArrayTableEnd
|
|
||||||
itemKeyStart
|
|
||||||
itemCommentStart
|
|
||||||
itemInlineTableStart
|
|
||||||
itemInlineTableEnd
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
eof = 0
|
|
||||||
comma = ','
|
|
||||||
tableStart = '['
|
|
||||||
tableEnd = ']'
|
|
||||||
arrayTableStart = '['
|
|
||||||
arrayTableEnd = ']'
|
|
||||||
tableSep = '.'
|
|
||||||
keySep = '='
|
|
||||||
arrayStart = '['
|
|
||||||
arrayEnd = ']'
|
|
||||||
commentStart = '#'
|
|
||||||
stringStart = '"'
|
|
||||||
stringEnd = '"'
|
|
||||||
rawStringStart = '\''
|
|
||||||
rawStringEnd = '\''
|
|
||||||
inlineTableStart = '{'
|
|
||||||
inlineTableEnd = '}'
|
|
||||||
)
|
|
||||||
|
|
||||||
type stateFn func(lx *lexer) stateFn
|
|
||||||
|
|
||||||
type lexer struct {
|
|
||||||
input string
|
|
||||||
start int
|
|
||||||
pos int
|
|
||||||
line int
|
|
||||||
state stateFn
|
|
||||||
items chan item
|
|
||||||
|
|
||||||
// Allow for backing up up to three runes.
|
|
||||||
// This is necessary because TOML contains 3-rune tokens (""" and ''').
|
|
||||||
prevWidths [3]int
|
|
||||||
nprev int // how many of prevWidths are in use
|
|
||||||
// If we emit an eof, we can still back up, but it is not OK to call
|
|
||||||
// next again.
|
|
||||||
atEOF bool
|
|
||||||
|
|
||||||
// A stack of state functions used to maintain context.
|
|
||||||
// The idea is to reuse parts of the state machine in various places.
|
|
||||||
// For example, values can appear at the top level or within arbitrarily
|
|
||||||
// nested arrays. The last state on the stack is used after a value has
|
|
||||||
// been lexed. Similarly for comments.
|
|
||||||
stack []stateFn
|
|
||||||
}
|
|
||||||
|
|
||||||
type item struct {
|
|
||||||
typ itemType
|
|
||||||
val string
|
|
||||||
line int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lx *lexer) nextItem() item {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case item := <-lx.items:
|
|
||||||
return item
|
|
||||||
default:
|
|
||||||
lx.state = lx.state(lx)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func lex(input string) *lexer {
|
|
||||||
lx := &lexer{
|
|
||||||
input: input,
|
|
||||||
state: lexTop,
|
|
||||||
line: 1,
|
|
||||||
items: make(chan item, 10),
|
|
||||||
stack: make([]stateFn, 0, 10),
|
|
||||||
}
|
|
||||||
return lx
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lx *lexer) push(state stateFn) {
|
|
||||||
lx.stack = append(lx.stack, state)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lx *lexer) pop() stateFn {
|
|
||||||
if len(lx.stack) == 0 {
|
|
||||||
return lx.errorf("BUG in lexer: no states to pop")
|
|
||||||
}
|
|
||||||
last := lx.stack[len(lx.stack)-1]
|
|
||||||
lx.stack = lx.stack[0 : len(lx.stack)-1]
|
|
||||||
return last
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lx *lexer) current() string {
|
|
||||||
return lx.input[lx.start:lx.pos]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lx *lexer) emit(typ itemType) {
|
|
||||||
lx.items <- item{typ, lx.current(), lx.line}
|
|
||||||
lx.start = lx.pos
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lx *lexer) emitTrim(typ itemType) {
|
|
||||||
lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line}
|
|
||||||
lx.start = lx.pos
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lx *lexer) next() (r rune) {
|
|
||||||
if lx.atEOF {
|
|
||||||
panic("next called after EOF")
|
|
||||||
}
|
|
||||||
if lx.pos >= len(lx.input) {
|
|
||||||
lx.atEOF = true
|
|
||||||
return eof
|
|
||||||
}
|
|
||||||
|
|
||||||
if lx.input[lx.pos] == '\n' {
|
|
||||||
lx.line++
|
|
||||||
}
|
|
||||||
lx.prevWidths[2] = lx.prevWidths[1]
|
|
||||||
lx.prevWidths[1] = lx.prevWidths[0]
|
|
||||||
if lx.nprev < 3 {
|
|
||||||
lx.nprev++
|
|
||||||
}
|
|
||||||
r, w := utf8.DecodeRuneInString(lx.input[lx.pos:])
|
|
||||||
lx.prevWidths[0] = w
|
|
||||||
lx.pos += w
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
// ignore skips over the pending input before this point.
|
|
||||||
func (lx *lexer) ignore() {
|
|
||||||
lx.start = lx.pos
|
|
||||||
}
|
|
||||||
|
|
||||||
// backup steps back one rune. Can be called only twice between calls to next.
|
|
||||||
func (lx *lexer) backup() {
|
|
||||||
if lx.atEOF {
|
|
||||||
lx.atEOF = false
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if lx.nprev < 1 {
|
|
||||||
panic("backed up too far")
|
|
||||||
}
|
|
||||||
w := lx.prevWidths[0]
|
|
||||||
lx.prevWidths[0] = lx.prevWidths[1]
|
|
||||||
lx.prevWidths[1] = lx.prevWidths[2]
|
|
||||||
lx.nprev--
|
|
||||||
lx.pos -= w
|
|
||||||
if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
|
|
||||||
lx.line--
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// accept consumes the next rune if it's equal to `valid`.
|
|
||||||
func (lx *lexer) accept(valid rune) bool {
|
|
||||||
if lx.next() == valid {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
lx.backup()
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// peek returns but does not consume the next rune in the input.
|
|
||||||
func (lx *lexer) peek() rune {
|
|
||||||
r := lx.next()
|
|
||||||
lx.backup()
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
// skip ignores all input that matches the given predicate.
|
|
||||||
func (lx *lexer) skip(pred func(rune) bool) {
|
|
||||||
for {
|
|
||||||
r := lx.next()
|
|
||||||
if pred(r) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
lx.backup()
|
|
||||||
lx.ignore()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// errorf stops all lexing by emitting an error and returning `nil`.
|
|
||||||
// Note that any value that is a character is escaped if it's a special
|
|
||||||
// character (newlines, tabs, etc.).
|
|
||||||
func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
|
|
||||||
lx.items <- item{
|
|
||||||
itemError,
|
|
||||||
fmt.Sprintf(format, values...),
|
|
||||||
lx.line,
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexTop consumes elements at the top level of TOML data.
|
|
||||||
func lexTop(lx *lexer) stateFn {
|
|
||||||
r := lx.next()
|
|
||||||
if isWhitespace(r) || isNL(r) {
|
|
||||||
return lexSkip(lx, lexTop)
|
|
||||||
}
|
|
||||||
switch r {
|
|
||||||
case commentStart:
|
|
||||||
lx.push(lexTop)
|
|
||||||
return lexCommentStart
|
|
||||||
case tableStart:
|
|
||||||
return lexTableStart
|
|
||||||
case eof:
|
|
||||||
if lx.pos > lx.start {
|
|
||||||
return lx.errorf("unexpected EOF")
|
|
||||||
}
|
|
||||||
lx.emit(itemEOF)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// At this point, the only valid item can be a key, so we back up
|
|
||||||
// and let the key lexer do the rest.
|
|
||||||
lx.backup()
|
|
||||||
lx.push(lexTopEnd)
|
|
||||||
return lexKeyStart
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexTopEnd is entered whenever a top-level item has been consumed. (A value
|
|
||||||
// or a table.) It must see only whitespace, and will turn back to lexTop
|
|
||||||
// upon a newline. If it sees EOF, it will quit the lexer successfully.
|
|
||||||
func lexTopEnd(lx *lexer) stateFn {
|
|
||||||
r := lx.next()
|
|
||||||
switch {
|
|
||||||
case r == commentStart:
|
|
||||||
// a comment will read to a newline for us.
|
|
||||||
lx.push(lexTop)
|
|
||||||
return lexCommentStart
|
|
||||||
case isWhitespace(r):
|
|
||||||
return lexTopEnd
|
|
||||||
case isNL(r):
|
|
||||||
lx.ignore()
|
|
||||||
return lexTop
|
|
||||||
case r == eof:
|
|
||||||
lx.emit(itemEOF)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return lx.errorf("expected a top-level item to end with a newline, "+
|
|
||||||
"comment, or EOF, but got %q instead", r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexTable lexes the beginning of a table. Namely, it makes sure that
|
|
||||||
// it starts with a character other than '.' and ']'.
|
|
||||||
// It assumes that '[' has already been consumed.
|
|
||||||
// It also handles the case that this is an item in an array of tables.
|
|
||||||
// e.g., '[[name]]'.
|
|
||||||
func lexTableStart(lx *lexer) stateFn {
|
|
||||||
if lx.peek() == arrayTableStart {
|
|
||||||
lx.next()
|
|
||||||
lx.emit(itemArrayTableStart)
|
|
||||||
lx.push(lexArrayTableEnd)
|
|
||||||
} else {
|
|
||||||
lx.emit(itemTableStart)
|
|
||||||
lx.push(lexTableEnd)
|
|
||||||
}
|
|
||||||
return lexTableNameStart
|
|
||||||
}
|
|
||||||
|
|
||||||
func lexTableEnd(lx *lexer) stateFn {
|
|
||||||
lx.emit(itemTableEnd)
|
|
||||||
return lexTopEnd
|
|
||||||
}
|
|
||||||
|
|
||||||
func lexArrayTableEnd(lx *lexer) stateFn {
|
|
||||||
if r := lx.next(); r != arrayTableEnd {
|
|
||||||
return lx.errorf("expected end of table array name delimiter %q, "+
|
|
||||||
"but got %q instead", arrayTableEnd, r)
|
|
||||||
}
|
|
||||||
lx.emit(itemArrayTableEnd)
|
|
||||||
return lexTopEnd
|
|
||||||
}
|
|
||||||
|
|
||||||
func lexTableNameStart(lx *lexer) stateFn {
|
|
||||||
lx.skip(isWhitespace)
|
|
||||||
switch r := lx.peek(); {
|
|
||||||
case r == tableEnd || r == eof:
|
|
||||||
return lx.errorf("unexpected end of table name " +
|
|
||||||
"(table names cannot be empty)")
|
|
||||||
case r == tableSep:
|
|
||||||
return lx.errorf("unexpected table separator " +
|
|
||||||
"(table names cannot be empty)")
|
|
||||||
case r == stringStart || r == rawStringStart:
|
|
||||||
lx.ignore()
|
|
||||||
lx.push(lexTableNameEnd)
|
|
||||||
return lexValue // reuse string lexing
|
|
||||||
default:
|
|
||||||
return lexBareTableName
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexBareTableName lexes the name of a table. It assumes that at least one
|
|
||||||
// valid character for the table has already been read.
|
|
||||||
func lexBareTableName(lx *lexer) stateFn {
|
|
||||||
r := lx.next()
|
|
||||||
if isBareKeyChar(r) {
|
|
||||||
return lexBareTableName
|
|
||||||
}
|
|
||||||
lx.backup()
|
|
||||||
lx.emit(itemText)
|
|
||||||
return lexTableNameEnd
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexTableNameEnd reads the end of a piece of a table name, optionally
|
|
||||||
// consuming whitespace.
|
|
||||||
func lexTableNameEnd(lx *lexer) stateFn {
|
|
||||||
lx.skip(isWhitespace)
|
|
||||||
switch r := lx.next(); {
|
|
||||||
case isWhitespace(r):
|
|
||||||
return lexTableNameEnd
|
|
||||||
case r == tableSep:
|
|
||||||
lx.ignore()
|
|
||||||
return lexTableNameStart
|
|
||||||
case r == tableEnd:
|
|
||||||
return lx.pop()
|
|
||||||
default:
|
|
||||||
return lx.errorf("expected '.' or ']' to end table name, "+
|
|
||||||
"but got %q instead", r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexKeyStart consumes a key name up until the first non-whitespace character.
|
|
||||||
// lexKeyStart will ignore whitespace.
|
|
||||||
func lexKeyStart(lx *lexer) stateFn {
|
|
||||||
r := lx.peek()
|
|
||||||
switch {
|
|
||||||
case r == keySep:
|
|
||||||
return lx.errorf("unexpected key separator %q", keySep)
|
|
||||||
case isWhitespace(r) || isNL(r):
|
|
||||||
lx.next()
|
|
||||||
return lexSkip(lx, lexKeyStart)
|
|
||||||
case r == stringStart || r == rawStringStart:
|
|
||||||
lx.ignore()
|
|
||||||
lx.emit(itemKeyStart)
|
|
||||||
lx.push(lexKeyEnd)
|
|
||||||
return lexValue // reuse string lexing
|
|
||||||
default:
|
|
||||||
lx.ignore()
|
|
||||||
lx.emit(itemKeyStart)
|
|
||||||
return lexBareKey
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexBareKey consumes the text of a bare key. Assumes that the first character
|
|
||||||
// (which is not whitespace) has not yet been consumed.
|
|
||||||
func lexBareKey(lx *lexer) stateFn {
|
|
||||||
switch r := lx.next(); {
|
|
||||||
case isBareKeyChar(r):
|
|
||||||
return lexBareKey
|
|
||||||
case isWhitespace(r):
|
|
||||||
lx.backup()
|
|
||||||
lx.emit(itemText)
|
|
||||||
return lexKeyEnd
|
|
||||||
case r == keySep:
|
|
||||||
lx.backup()
|
|
||||||
lx.emit(itemText)
|
|
||||||
return lexKeyEnd
|
|
||||||
default:
|
|
||||||
return lx.errorf("bare keys cannot contain %q", r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexKeyEnd consumes the end of a key and trims whitespace (up to the key
|
|
||||||
// separator).
|
|
||||||
func lexKeyEnd(lx *lexer) stateFn {
|
|
||||||
switch r := lx.next(); {
|
|
||||||
case r == keySep:
|
|
||||||
return lexSkip(lx, lexValue)
|
|
||||||
case isWhitespace(r):
|
|
||||||
return lexSkip(lx, lexKeyEnd)
|
|
||||||
default:
|
|
||||||
return lx.errorf("expected key separator %q, but got %q instead",
|
|
||||||
keySep, r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexValue starts the consumption of a value anywhere a value is expected.
|
|
||||||
// lexValue will ignore whitespace.
|
|
||||||
// After a value is lexed, the last state on the next is popped and returned.
|
|
||||||
func lexValue(lx *lexer) stateFn {
|
|
||||||
// We allow whitespace to precede a value, but NOT newlines.
|
|
||||||
// In array syntax, the array states are responsible for ignoring newlines.
|
|
||||||
r := lx.next()
|
|
||||||
switch {
|
|
||||||
case isWhitespace(r):
|
|
||||||
return lexSkip(lx, lexValue)
|
|
||||||
case isDigit(r):
|
|
||||||
lx.backup() // avoid an extra state and use the same as above
|
|
||||||
return lexNumberOrDateStart
|
|
||||||
}
|
|
||||||
switch r {
|
|
||||||
case arrayStart:
|
|
||||||
lx.ignore()
|
|
||||||
lx.emit(itemArray)
|
|
||||||
return lexArrayValue
|
|
||||||
case inlineTableStart:
|
|
||||||
lx.ignore()
|
|
||||||
lx.emit(itemInlineTableStart)
|
|
||||||
return lexInlineTableValue
|
|
||||||
case stringStart:
|
|
||||||
if lx.accept(stringStart) {
|
|
||||||
if lx.accept(stringStart) {
|
|
||||||
lx.ignore() // Ignore """
|
|
||||||
return lexMultilineString
|
|
||||||
}
|
|
||||||
lx.backup()
|
|
||||||
}
|
|
||||||
lx.ignore() // ignore the '"'
|
|
||||||
return lexString
|
|
||||||
case rawStringStart:
|
|
||||||
if lx.accept(rawStringStart) {
|
|
||||||
if lx.accept(rawStringStart) {
|
|
||||||
lx.ignore() // Ignore """
|
|
||||||
return lexMultilineRawString
|
|
||||||
}
|
|
||||||
lx.backup()
|
|
||||||
}
|
|
||||||
lx.ignore() // ignore the "'"
|
|
||||||
return lexRawString
|
|
||||||
case '+', '-':
|
|
||||||
return lexNumberStart
|
|
||||||
case '.': // special error case, be kind to users
|
|
||||||
return lx.errorf("floats must start with a digit, not '.'")
|
|
||||||
}
|
|
||||||
if unicode.IsLetter(r) {
|
|
||||||
// Be permissive here; lexBool will give a nice error if the
|
|
||||||
// user wrote something like
|
|
||||||
// x = foo
|
|
||||||
// (i.e. not 'true' or 'false' but is something else word-like.)
|
|
||||||
lx.backup()
|
|
||||||
return lexBool
|
|
||||||
}
|
|
||||||
return lx.errorf("expected value but found %q instead", r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexArrayValue consumes one value in an array. It assumes that '[' or ','
|
|
||||||
// have already been consumed. All whitespace and newlines are ignored.
|
|
||||||
func lexArrayValue(lx *lexer) stateFn {
|
|
||||||
r := lx.next()
|
|
||||||
switch {
|
|
||||||
case isWhitespace(r) || isNL(r):
|
|
||||||
return lexSkip(lx, lexArrayValue)
|
|
||||||
case r == commentStart:
|
|
||||||
lx.push(lexArrayValue)
|
|
||||||
return lexCommentStart
|
|
||||||
case r == comma:
|
|
||||||
return lx.errorf("unexpected comma")
|
|
||||||
case r == arrayEnd:
|
|
||||||
// NOTE(caleb): The spec isn't clear about whether you can have
|
|
||||||
// a trailing comma or not, so we'll allow it.
|
|
||||||
return lexArrayEnd
|
|
||||||
}
|
|
||||||
|
|
||||||
lx.backup()
|
|
||||||
lx.push(lexArrayValueEnd)
|
|
||||||
return lexValue
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexArrayValueEnd consumes everything between the end of an array value and
|
|
||||||
// the next value (or the end of the array): it ignores whitespace and newlines
|
|
||||||
// and expects either a ',' or a ']'.
|
|
||||||
func lexArrayValueEnd(lx *lexer) stateFn {
|
|
||||||
r := lx.next()
|
|
||||||
switch {
|
|
||||||
case isWhitespace(r) || isNL(r):
|
|
||||||
return lexSkip(lx, lexArrayValueEnd)
|
|
||||||
case r == commentStart:
|
|
||||||
lx.push(lexArrayValueEnd)
|
|
||||||
return lexCommentStart
|
|
||||||
case r == comma:
|
|
||||||
lx.ignore()
|
|
||||||
return lexArrayValue // move on to the next value
|
|
||||||
case r == arrayEnd:
|
|
||||||
return lexArrayEnd
|
|
||||||
}
|
|
||||||
return lx.errorf(
|
|
||||||
"expected a comma or array terminator %q, but got %q instead",
|
|
||||||
arrayEnd, r,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexArrayEnd finishes the lexing of an array.
|
|
||||||
// It assumes that a ']' has just been consumed.
|
|
||||||
func lexArrayEnd(lx *lexer) stateFn {
|
|
||||||
lx.ignore()
|
|
||||||
lx.emit(itemArrayEnd)
|
|
||||||
return lx.pop()
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexInlineTableValue consumes one key/value pair in an inline table.
|
|
||||||
// It assumes that '{' or ',' have already been consumed. Whitespace is ignored.
|
|
||||||
func lexInlineTableValue(lx *lexer) stateFn {
|
|
||||||
r := lx.next()
|
|
||||||
switch {
|
|
||||||
case isWhitespace(r):
|
|
||||||
return lexSkip(lx, lexInlineTableValue)
|
|
||||||
case isNL(r):
|
|
||||||
return lx.errorf("newlines not allowed within inline tables")
|
|
||||||
case r == commentStart:
|
|
||||||
lx.push(lexInlineTableValue)
|
|
||||||
return lexCommentStart
|
|
||||||
case r == comma:
|
|
||||||
return lx.errorf("unexpected comma")
|
|
||||||
case r == inlineTableEnd:
|
|
||||||
return lexInlineTableEnd
|
|
||||||
}
|
|
||||||
lx.backup()
|
|
||||||
lx.push(lexInlineTableValueEnd)
|
|
||||||
return lexKeyStart
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexInlineTableValueEnd consumes everything between the end of an inline table
|
|
||||||
// key/value pair and the next pair (or the end of the table):
|
|
||||||
// it ignores whitespace and expects either a ',' or a '}'.
|
|
||||||
func lexInlineTableValueEnd(lx *lexer) stateFn {
|
|
||||||
r := lx.next()
|
|
||||||
switch {
|
|
||||||
case isWhitespace(r):
|
|
||||||
return lexSkip(lx, lexInlineTableValueEnd)
|
|
||||||
case isNL(r):
|
|
||||||
return lx.errorf("newlines not allowed within inline tables")
|
|
||||||
case r == commentStart:
|
|
||||||
lx.push(lexInlineTableValueEnd)
|
|
||||||
return lexCommentStart
|
|
||||||
case r == comma:
|
|
||||||
lx.ignore()
|
|
||||||
return lexInlineTableValue
|
|
||||||
case r == inlineTableEnd:
|
|
||||||
return lexInlineTableEnd
|
|
||||||
}
|
|
||||||
return lx.errorf("expected a comma or an inline table terminator %q, "+
|
|
||||||
"but got %q instead", inlineTableEnd, r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexInlineTableEnd finishes the lexing of an inline table.
|
|
||||||
// It assumes that a '}' has just been consumed.
|
|
||||||
func lexInlineTableEnd(lx *lexer) stateFn {
|
|
||||||
lx.ignore()
|
|
||||||
lx.emit(itemInlineTableEnd)
|
|
||||||
return lx.pop()
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexString consumes the inner contents of a string. It assumes that the
|
|
||||||
// beginning '"' has already been consumed and ignored.
|
|
||||||
func lexString(lx *lexer) stateFn {
|
|
||||||
r := lx.next()
|
|
||||||
switch {
|
|
||||||
case r == eof:
|
|
||||||
return lx.errorf("unexpected EOF")
|
|
||||||
case isNL(r):
|
|
||||||
return lx.errorf("strings cannot contain newlines")
|
|
||||||
case r == '\\':
|
|
||||||
lx.push(lexString)
|
|
||||||
return lexStringEscape
|
|
||||||
case r == stringEnd:
|
|
||||||
lx.backup()
|
|
||||||
lx.emit(itemString)
|
|
||||||
lx.next()
|
|
||||||
lx.ignore()
|
|
||||||
return lx.pop()
|
|
||||||
}
|
|
||||||
return lexString
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexMultilineString consumes the inner contents of a string. It assumes that
|
|
||||||
// the beginning '"""' has already been consumed and ignored.
|
|
||||||
func lexMultilineString(lx *lexer) stateFn {
|
|
||||||
switch lx.next() {
|
|
||||||
case eof:
|
|
||||||
return lx.errorf("unexpected EOF")
|
|
||||||
case '\\':
|
|
||||||
return lexMultilineStringEscape
|
|
||||||
case stringEnd:
|
|
||||||
if lx.accept(stringEnd) {
|
|
||||||
if lx.accept(stringEnd) {
|
|
||||||
lx.backup()
|
|
||||||
lx.backup()
|
|
||||||
lx.backup()
|
|
||||||
lx.emit(itemMultilineString)
|
|
||||||
lx.next()
|
|
||||||
lx.next()
|
|
||||||
lx.next()
|
|
||||||
lx.ignore()
|
|
||||||
return lx.pop()
|
|
||||||
}
|
|
||||||
lx.backup()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return lexMultilineString
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexRawString consumes a raw string. Nothing can be escaped in such a string.
|
|
||||||
// It assumes that the beginning "'" has already been consumed and ignored.
|
|
||||||
func lexRawString(lx *lexer) stateFn {
|
|
||||||
r := lx.next()
|
|
||||||
switch {
|
|
||||||
case r == eof:
|
|
||||||
return lx.errorf("unexpected EOF")
|
|
||||||
case isNL(r):
|
|
||||||
return lx.errorf("strings cannot contain newlines")
|
|
||||||
case r == rawStringEnd:
|
|
||||||
lx.backup()
|
|
||||||
lx.emit(itemRawString)
|
|
||||||
lx.next()
|
|
||||||
lx.ignore()
|
|
||||||
return lx.pop()
|
|
||||||
}
|
|
||||||
return lexRawString
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
|
|
||||||
// a string. It assumes that the beginning "'''" has already been consumed and
|
|
||||||
// ignored.
|
|
||||||
func lexMultilineRawString(lx *lexer) stateFn {
|
|
||||||
switch lx.next() {
|
|
||||||
case eof:
|
|
||||||
return lx.errorf("unexpected EOF")
|
|
||||||
case rawStringEnd:
|
|
||||||
if lx.accept(rawStringEnd) {
|
|
||||||
if lx.accept(rawStringEnd) {
|
|
||||||
lx.backup()
|
|
||||||
lx.backup()
|
|
||||||
lx.backup()
|
|
||||||
lx.emit(itemRawMultilineString)
|
|
||||||
lx.next()
|
|
||||||
lx.next()
|
|
||||||
lx.next()
|
|
||||||
lx.ignore()
|
|
||||||
return lx.pop()
|
|
||||||
}
|
|
||||||
lx.backup()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return lexMultilineRawString
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexMultilineStringEscape consumes an escaped character. It assumes that the
|
|
||||||
// preceding '\\' has already been consumed.
|
|
||||||
func lexMultilineStringEscape(lx *lexer) stateFn {
|
|
||||||
// Handle the special case first:
|
|
||||||
if isNL(lx.next()) {
|
|
||||||
return lexMultilineString
|
|
||||||
}
|
|
||||||
lx.backup()
|
|
||||||
lx.push(lexMultilineString)
|
|
||||||
return lexStringEscape(lx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func lexStringEscape(lx *lexer) stateFn {
|
|
||||||
r := lx.next()
|
|
||||||
switch r {
|
|
||||||
case 'b':
|
|
||||||
fallthrough
|
|
||||||
case 't':
|
|
||||||
fallthrough
|
|
||||||
case 'n':
|
|
||||||
fallthrough
|
|
||||||
case 'f':
|
|
||||||
fallthrough
|
|
||||||
case 'r':
|
|
||||||
fallthrough
|
|
||||||
case '"':
|
|
||||||
fallthrough
|
|
||||||
case '\\':
|
|
||||||
return lx.pop()
|
|
||||||
case 'u':
|
|
||||||
return lexShortUnicodeEscape
|
|
||||||
case 'U':
|
|
||||||
return lexLongUnicodeEscape
|
|
||||||
}
|
|
||||||
return lx.errorf("invalid escape character %q; only the following "+
|
|
||||||
"escape characters are allowed: "+
|
|
||||||
`\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r)
|
|
||||||
}
|
|
||||||
|
|
||||||
func lexShortUnicodeEscape(lx *lexer) stateFn {
|
|
||||||
var r rune
|
|
||||||
for i := 0; i < 4; i++ {
|
|
||||||
r = lx.next()
|
|
||||||
if !isHexadecimal(r) {
|
|
||||||
return lx.errorf(`expected four hexadecimal digits after '\u', `+
|
|
||||||
"but got %q instead", lx.current())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return lx.pop()
|
|
||||||
}
|
|
||||||
|
|
||||||
func lexLongUnicodeEscape(lx *lexer) stateFn {
|
|
||||||
var r rune
|
|
||||||
for i := 0; i < 8; i++ {
|
|
||||||
r = lx.next()
|
|
||||||
if !isHexadecimal(r) {
|
|
||||||
return lx.errorf(`expected eight hexadecimal digits after '\U', `+
|
|
||||||
"but got %q instead", lx.current())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return lx.pop()
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexNumberOrDateStart consumes either an integer, a float, or datetime.
|
|
||||||
func lexNumberOrDateStart(lx *lexer) stateFn {
|
|
||||||
r := lx.next()
|
|
||||||
if isDigit(r) {
|
|
||||||
return lexNumberOrDate
|
|
||||||
}
|
|
||||||
switch r {
|
|
||||||
case '_':
|
|
||||||
return lexNumber
|
|
||||||
case 'e', 'E':
|
|
||||||
return lexFloat
|
|
||||||
case '.':
|
|
||||||
return lx.errorf("floats must start with a digit, not '.'")
|
|
||||||
}
|
|
||||||
return lx.errorf("expected a digit but got %q", r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexNumberOrDate consumes either an integer, float or datetime.
|
|
||||||
func lexNumberOrDate(lx *lexer) stateFn {
|
|
||||||
r := lx.next()
|
|
||||||
if isDigit(r) {
|
|
||||||
return lexNumberOrDate
|
|
||||||
}
|
|
||||||
switch r {
|
|
||||||
case '-':
|
|
||||||
return lexDatetime
|
|
||||||
case '_':
|
|
||||||
return lexNumber
|
|
||||||
case '.', 'e', 'E':
|
|
||||||
return lexFloat
|
|
||||||
}
|
|
||||||
|
|
||||||
lx.backup()
|
|
||||||
lx.emit(itemInteger)
|
|
||||||
return lx.pop()
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexDatetime consumes a Datetime, to a first approximation.
|
|
||||||
// The parser validates that it matches one of the accepted formats.
|
|
||||||
func lexDatetime(lx *lexer) stateFn {
|
|
||||||
r := lx.next()
|
|
||||||
if isDigit(r) {
|
|
||||||
return lexDatetime
|
|
||||||
}
|
|
||||||
switch r {
|
|
||||||
case '-', 'T', ':', '.', 'Z', '+':
|
|
||||||
return lexDatetime
|
|
||||||
}
|
|
||||||
|
|
||||||
lx.backup()
|
|
||||||
lx.emit(itemDatetime)
|
|
||||||
return lx.pop()
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexNumberStart consumes either an integer or a float. It assumes that a sign
|
|
||||||
// has already been read, but that *no* digits have been consumed.
|
|
||||||
// lexNumberStart will move to the appropriate integer or float states.
|
|
||||||
func lexNumberStart(lx *lexer) stateFn {
|
|
||||||
// We MUST see a digit. Even floats have to start with a digit.
|
|
||||||
r := lx.next()
|
|
||||||
if !isDigit(r) {
|
|
||||||
if r == '.' {
|
|
||||||
return lx.errorf("floats must start with a digit, not '.'")
|
|
||||||
}
|
|
||||||
return lx.errorf("expected a digit but got %q", r)
|
|
||||||
}
|
|
||||||
return lexNumber
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexNumber consumes an integer or a float after seeing the first digit.
|
|
||||||
func lexNumber(lx *lexer) stateFn {
|
|
||||||
r := lx.next()
|
|
||||||
if isDigit(r) {
|
|
||||||
return lexNumber
|
|
||||||
}
|
|
||||||
switch r {
|
|
||||||
case '_':
|
|
||||||
return lexNumber
|
|
||||||
case '.', 'e', 'E':
|
|
||||||
return lexFloat
|
|
||||||
}
|
|
||||||
|
|
||||||
lx.backup()
|
|
||||||
lx.emit(itemInteger)
|
|
||||||
return lx.pop()
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexFloat consumes the elements of a float. It allows any sequence of
|
|
||||||
// float-like characters, so floats emitted by the lexer are only a first
|
|
||||||
// approximation and must be validated by the parser.
|
|
||||||
func lexFloat(lx *lexer) stateFn {
|
|
||||||
r := lx.next()
|
|
||||||
if isDigit(r) {
|
|
||||||
return lexFloat
|
|
||||||
}
|
|
||||||
switch r {
|
|
||||||
case '_', '.', '-', '+', 'e', 'E':
|
|
||||||
return lexFloat
|
|
||||||
}
|
|
||||||
|
|
||||||
lx.backup()
|
|
||||||
lx.emit(itemFloat)
|
|
||||||
return lx.pop()
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexBool consumes a bool string: 'true' or 'false.
|
|
||||||
func lexBool(lx *lexer) stateFn {
|
|
||||||
var rs []rune
|
|
||||||
for {
|
|
||||||
r := lx.next()
|
|
||||||
if !unicode.IsLetter(r) {
|
|
||||||
lx.backup()
|
|
||||||
break
|
|
||||||
}
|
|
||||||
rs = append(rs, r)
|
|
||||||
}
|
|
||||||
s := string(rs)
|
|
||||||
switch s {
|
|
||||||
case "true", "false":
|
|
||||||
lx.emit(itemBool)
|
|
||||||
return lx.pop()
|
|
||||||
}
|
|
||||||
return lx.errorf("expected value but found %q instead", s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexCommentStart begins the lexing of a comment. It will emit
|
|
||||||
// itemCommentStart and consume no characters, passing control to lexComment.
|
|
||||||
func lexCommentStart(lx *lexer) stateFn {
|
|
||||||
lx.ignore()
|
|
||||||
lx.emit(itemCommentStart)
|
|
||||||
return lexComment
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexComment lexes an entire comment. It assumes that '#' has been consumed.
|
|
||||||
// It will consume *up to* the first newline character, and pass control
|
|
||||||
// back to the last state on the stack.
|
|
||||||
func lexComment(lx *lexer) stateFn {
|
|
||||||
r := lx.peek()
|
|
||||||
if isNL(r) || r == eof {
|
|
||||||
lx.emit(itemText)
|
|
||||||
return lx.pop()
|
|
||||||
}
|
|
||||||
lx.next()
|
|
||||||
return lexComment
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexSkip ignores all slurped input and moves on to the next state.
|
|
||||||
func lexSkip(lx *lexer, nextState stateFn) stateFn {
|
|
||||||
return func(lx *lexer) stateFn {
|
|
||||||
lx.ignore()
|
|
||||||
return nextState
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// isWhitespace returns true if `r` is a whitespace character according
|
|
||||||
// to the spec.
|
|
||||||
func isWhitespace(r rune) bool {
|
|
||||||
return r == '\t' || r == ' '
|
|
||||||
}
|
|
||||||
|
|
||||||
func isNL(r rune) bool {
|
|
||||||
return r == '\n' || r == '\r'
|
|
||||||
}
|
|
||||||
|
|
||||||
func isDigit(r rune) bool {
|
|
||||||
return r >= '0' && r <= '9'
|
|
||||||
}
|
|
||||||
|
|
||||||
func isHexadecimal(r rune) bool {
|
|
||||||
return (r >= '0' && r <= '9') ||
|
|
||||||
(r >= 'a' && r <= 'f') ||
|
|
||||||
(r >= 'A' && r <= 'F')
|
|
||||||
}
|
|
||||||
|
|
||||||
func isBareKeyChar(r rune) bool {
|
|
||||||
return (r >= 'A' && r <= 'Z') ||
|
|
||||||
(r >= 'a' && r <= 'z') ||
|
|
||||||
(r >= '0' && r <= '9') ||
|
|
||||||
r == '_' ||
|
|
||||||
r == '-'
|
|
||||||
}
|
|
||||||
|
|
||||||
func (itype itemType) String() string {
|
|
||||||
switch itype {
|
|
||||||
case itemError:
|
|
||||||
return "Error"
|
|
||||||
case itemNIL:
|
|
||||||
return "NIL"
|
|
||||||
case itemEOF:
|
|
||||||
return "EOF"
|
|
||||||
case itemText:
|
|
||||||
return "Text"
|
|
||||||
case itemString, itemRawString, itemMultilineString, itemRawMultilineString:
|
|
||||||
return "String"
|
|
||||||
case itemBool:
|
|
||||||
return "Bool"
|
|
||||||
case itemInteger:
|
|
||||||
return "Integer"
|
|
||||||
case itemFloat:
|
|
||||||
return "Float"
|
|
||||||
case itemDatetime:
|
|
||||||
return "DateTime"
|
|
||||||
case itemTableStart:
|
|
||||||
return "TableStart"
|
|
||||||
case itemTableEnd:
|
|
||||||
return "TableEnd"
|
|
||||||
case itemKeyStart:
|
|
||||||
return "KeyStart"
|
|
||||||
case itemArray:
|
|
||||||
return "Array"
|
|
||||||
case itemArrayEnd:
|
|
||||||
return "ArrayEnd"
|
|
||||||
case itemCommentStart:
|
|
||||||
return "CommentStart"
|
|
||||||
}
|
|
||||||
panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype)))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (item item) String() string {
|
|
||||||
return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val)
|
|
||||||
}
|
|
|
@ -1,592 +0,0 @@
|
||||||
package toml
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
"unicode"
|
|
||||||
"unicode/utf8"
|
|
||||||
)
|
|
||||||
|
|
||||||
type parser struct {
|
|
||||||
mapping map[string]interface{}
|
|
||||||
types map[string]tomlType
|
|
||||||
lx *lexer
|
|
||||||
|
|
||||||
// A list of keys in the order that they appear in the TOML data.
|
|
||||||
ordered []Key
|
|
||||||
|
|
||||||
// the full key for the current hash in scope
|
|
||||||
context Key
|
|
||||||
|
|
||||||
// the base key name for everything except hashes
|
|
||||||
currentKey string
|
|
||||||
|
|
||||||
// rough approximation of line number
|
|
||||||
approxLine int
|
|
||||||
|
|
||||||
// A map of 'key.group.names' to whether they were created implicitly.
|
|
||||||
implicits map[string]bool
|
|
||||||
}
|
|
||||||
|
|
||||||
type parseError string
|
|
||||||
|
|
||||||
func (pe parseError) Error() string {
|
|
||||||
return string(pe)
|
|
||||||
}
|
|
||||||
|
|
||||||
func parse(data string) (p *parser, err error) {
|
|
||||||
defer func() {
|
|
||||||
if r := recover(); r != nil {
|
|
||||||
var ok bool
|
|
||||||
if err, ok = r.(parseError); ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
panic(r)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
p = &parser{
|
|
||||||
mapping: make(map[string]interface{}),
|
|
||||||
types: make(map[string]tomlType),
|
|
||||||
lx: lex(data),
|
|
||||||
ordered: make([]Key, 0),
|
|
||||||
implicits: make(map[string]bool),
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
item := p.next()
|
|
||||||
if item.typ == itemEOF {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
p.topLevel(item)
|
|
||||||
}
|
|
||||||
|
|
||||||
return p, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) panicf(format string, v ...interface{}) {
|
|
||||||
msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s",
|
|
||||||
p.approxLine, p.current(), fmt.Sprintf(format, v...))
|
|
||||||
panic(parseError(msg))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) next() item {
|
|
||||||
it := p.lx.nextItem()
|
|
||||||
if it.typ == itemError {
|
|
||||||
p.panicf("%s", it.val)
|
|
||||||
}
|
|
||||||
return it
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) bug(format string, v ...interface{}) {
|
|
||||||
panic(fmt.Sprintf("BUG: "+format+"\n\n", v...))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) expect(typ itemType) item {
|
|
||||||
it := p.next()
|
|
||||||
p.assertEqual(typ, it.typ)
|
|
||||||
return it
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) assertEqual(expected, got itemType) {
|
|
||||||
if expected != got {
|
|
||||||
p.bug("Expected '%s' but got '%s'.", expected, got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) topLevel(item item) {
|
|
||||||
switch item.typ {
|
|
||||||
case itemCommentStart:
|
|
||||||
p.approxLine = item.line
|
|
||||||
p.expect(itemText)
|
|
||||||
case itemTableStart:
|
|
||||||
kg := p.next()
|
|
||||||
p.approxLine = kg.line
|
|
||||||
|
|
||||||
var key Key
|
|
||||||
for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() {
|
|
||||||
key = append(key, p.keyString(kg))
|
|
||||||
}
|
|
||||||
p.assertEqual(itemTableEnd, kg.typ)
|
|
||||||
|
|
||||||
p.establishContext(key, false)
|
|
||||||
p.setType("", tomlHash)
|
|
||||||
p.ordered = append(p.ordered, key)
|
|
||||||
case itemArrayTableStart:
|
|
||||||
kg := p.next()
|
|
||||||
p.approxLine = kg.line
|
|
||||||
|
|
||||||
var key Key
|
|
||||||
for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() {
|
|
||||||
key = append(key, p.keyString(kg))
|
|
||||||
}
|
|
||||||
p.assertEqual(itemArrayTableEnd, kg.typ)
|
|
||||||
|
|
||||||
p.establishContext(key, true)
|
|
||||||
p.setType("", tomlArrayHash)
|
|
||||||
p.ordered = append(p.ordered, key)
|
|
||||||
case itemKeyStart:
|
|
||||||
kname := p.next()
|
|
||||||
p.approxLine = kname.line
|
|
||||||
p.currentKey = p.keyString(kname)
|
|
||||||
|
|
||||||
val, typ := p.value(p.next())
|
|
||||||
p.setValue(p.currentKey, val)
|
|
||||||
p.setType(p.currentKey, typ)
|
|
||||||
p.ordered = append(p.ordered, p.context.add(p.currentKey))
|
|
||||||
p.currentKey = ""
|
|
||||||
default:
|
|
||||||
p.bug("Unexpected type at top level: %s", item.typ)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Gets a string for a key (or part of a key in a table name).
|
|
||||||
func (p *parser) keyString(it item) string {
|
|
||||||
switch it.typ {
|
|
||||||
case itemText:
|
|
||||||
return it.val
|
|
||||||
case itemString, itemMultilineString,
|
|
||||||
itemRawString, itemRawMultilineString:
|
|
||||||
s, _ := p.value(it)
|
|
||||||
return s.(string)
|
|
||||||
default:
|
|
||||||
p.bug("Unexpected key type: %s", it.typ)
|
|
||||||
panic("unreachable")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// value translates an expected value from the lexer into a Go value wrapped
|
|
||||||
// as an empty interface.
|
|
||||||
func (p *parser) value(it item) (interface{}, tomlType) {
|
|
||||||
switch it.typ {
|
|
||||||
case itemString:
|
|
||||||
return p.replaceEscapes(it.val), p.typeOfPrimitive(it)
|
|
||||||
case itemMultilineString:
|
|
||||||
trimmed := stripFirstNewline(stripEscapedWhitespace(it.val))
|
|
||||||
return p.replaceEscapes(trimmed), p.typeOfPrimitive(it)
|
|
||||||
case itemRawString:
|
|
||||||
return it.val, p.typeOfPrimitive(it)
|
|
||||||
case itemRawMultilineString:
|
|
||||||
return stripFirstNewline(it.val), p.typeOfPrimitive(it)
|
|
||||||
case itemBool:
|
|
||||||
switch it.val {
|
|
||||||
case "true":
|
|
||||||
return true, p.typeOfPrimitive(it)
|
|
||||||
case "false":
|
|
||||||
return false, p.typeOfPrimitive(it)
|
|
||||||
}
|
|
||||||
p.bug("Expected boolean value, but got '%s'.", it.val)
|
|
||||||
case itemInteger:
|
|
||||||
if !numUnderscoresOK(it.val) {
|
|
||||||
p.panicf("Invalid integer %q: underscores must be surrounded by digits",
|
|
||||||
it.val)
|
|
||||||
}
|
|
||||||
val := strings.Replace(it.val, "_", "", -1)
|
|
||||||
num, err := strconv.ParseInt(val, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
// Distinguish integer values. Normally, it'd be a bug if the lexer
|
|
||||||
// provides an invalid integer, but it's possible that the number is
|
|
||||||
// out of range of valid values (which the lexer cannot determine).
|
|
||||||
// So mark the former as a bug but the latter as a legitimate user
|
|
||||||
// error.
|
|
||||||
if e, ok := err.(*strconv.NumError); ok &&
|
|
||||||
e.Err == strconv.ErrRange {
|
|
||||||
|
|
||||||
p.panicf("Integer '%s' is out of the range of 64-bit "+
|
|
||||||
"signed integers.", it.val)
|
|
||||||
} else {
|
|
||||||
p.bug("Expected integer value, but got '%s'.", it.val)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return num, p.typeOfPrimitive(it)
|
|
||||||
case itemFloat:
|
|
||||||
parts := strings.FieldsFunc(it.val, func(r rune) bool {
|
|
||||||
switch r {
|
|
||||||
case '.', 'e', 'E':
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
})
|
|
||||||
for _, part := range parts {
|
|
||||||
if !numUnderscoresOK(part) {
|
|
||||||
p.panicf("Invalid float %q: underscores must be "+
|
|
||||||
"surrounded by digits", it.val)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !numPeriodsOK(it.val) {
|
|
||||||
// As a special case, numbers like '123.' or '1.e2',
|
|
||||||
// which are valid as far as Go/strconv are concerned,
|
|
||||||
// must be rejected because TOML says that a fractional
|
|
||||||
// part consists of '.' followed by 1+ digits.
|
|
||||||
p.panicf("Invalid float %q: '.' must be followed "+
|
|
||||||
"by one or more digits", it.val)
|
|
||||||
}
|
|
||||||
val := strings.Replace(it.val, "_", "", -1)
|
|
||||||
num, err := strconv.ParseFloat(val, 64)
|
|
||||||
if err != nil {
|
|
||||||
if e, ok := err.(*strconv.NumError); ok &&
|
|
||||||
e.Err == strconv.ErrRange {
|
|
||||||
|
|
||||||
p.panicf("Float '%s' is out of the range of 64-bit "+
|
|
||||||
"IEEE-754 floating-point numbers.", it.val)
|
|
||||||
} else {
|
|
||||||
p.panicf("Invalid float value: %q", it.val)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return num, p.typeOfPrimitive(it)
|
|
||||||
case itemDatetime:
|
|
||||||
var t time.Time
|
|
||||||
var ok bool
|
|
||||||
var err error
|
|
||||||
for _, format := range []string{
|
|
||||||
"2006-01-02T15:04:05Z07:00",
|
|
||||||
"2006-01-02T15:04:05",
|
|
||||||
"2006-01-02",
|
|
||||||
} {
|
|
||||||
t, err = time.ParseInLocation(format, it.val, time.Local)
|
|
||||||
if err == nil {
|
|
||||||
ok = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !ok {
|
|
||||||
p.panicf("Invalid TOML Datetime: %q.", it.val)
|
|
||||||
}
|
|
||||||
return t, p.typeOfPrimitive(it)
|
|
||||||
case itemArray:
|
|
||||||
array := make([]interface{}, 0)
|
|
||||||
types := make([]tomlType, 0)
|
|
||||||
|
|
||||||
for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
|
|
||||||
if it.typ == itemCommentStart {
|
|
||||||
p.expect(itemText)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
val, typ := p.value(it)
|
|
||||||
array = append(array, val)
|
|
||||||
types = append(types, typ)
|
|
||||||
}
|
|
||||||
return array, p.typeOfArray(types)
|
|
||||||
case itemInlineTableStart:
|
|
||||||
var (
|
|
||||||
hash = make(map[string]interface{})
|
|
||||||
outerContext = p.context
|
|
||||||
outerKey = p.currentKey
|
|
||||||
)
|
|
||||||
|
|
||||||
p.context = append(p.context, p.currentKey)
|
|
||||||
p.currentKey = ""
|
|
||||||
for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() {
|
|
||||||
if it.typ != itemKeyStart {
|
|
||||||
p.bug("Expected key start but instead found %q, around line %d",
|
|
||||||
it.val, p.approxLine)
|
|
||||||
}
|
|
||||||
if it.typ == itemCommentStart {
|
|
||||||
p.expect(itemText)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// retrieve key
|
|
||||||
k := p.next()
|
|
||||||
p.approxLine = k.line
|
|
||||||
kname := p.keyString(k)
|
|
||||||
|
|
||||||
// retrieve value
|
|
||||||
p.currentKey = kname
|
|
||||||
val, typ := p.value(p.next())
|
|
||||||
// make sure we keep metadata up to date
|
|
||||||
p.setType(kname, typ)
|
|
||||||
p.ordered = append(p.ordered, p.context.add(p.currentKey))
|
|
||||||
hash[kname] = val
|
|
||||||
}
|
|
||||||
p.context = outerContext
|
|
||||||
p.currentKey = outerKey
|
|
||||||
return hash, tomlHash
|
|
||||||
}
|
|
||||||
p.bug("Unexpected value type: %s", it.typ)
|
|
||||||
panic("unreachable")
|
|
||||||
}
|
|
||||||
|
|
||||||
// numUnderscoresOK checks whether each underscore in s is surrounded by
|
|
||||||
// characters that are not underscores.
|
|
||||||
func numUnderscoresOK(s string) bool {
|
|
||||||
accept := false
|
|
||||||
for _, r := range s {
|
|
||||||
if r == '_' {
|
|
||||||
if !accept {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
accept = false
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
accept = true
|
|
||||||
}
|
|
||||||
return accept
|
|
||||||
}
|
|
||||||
|
|
||||||
// numPeriodsOK checks whether every period in s is followed by a digit.
|
|
||||||
func numPeriodsOK(s string) bool {
|
|
||||||
period := false
|
|
||||||
for _, r := range s {
|
|
||||||
if period && !isDigit(r) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
period = r == '.'
|
|
||||||
}
|
|
||||||
return !period
|
|
||||||
}
|
|
||||||
|
|
||||||
// establishContext sets the current context of the parser,
|
|
||||||
// where the context is either a hash or an array of hashes. Which one is
|
|
||||||
// set depends on the value of the `array` parameter.
|
|
||||||
//
|
|
||||||
// Establishing the context also makes sure that the key isn't a duplicate, and
|
|
||||||
// will create implicit hashes automatically.
|
|
||||||
func (p *parser) establishContext(key Key, array bool) {
|
|
||||||
var ok bool
|
|
||||||
|
|
||||||
// Always start at the top level and drill down for our context.
|
|
||||||
hashContext := p.mapping
|
|
||||||
keyContext := make(Key, 0)
|
|
||||||
|
|
||||||
// We only need implicit hashes for key[0:-1]
|
|
||||||
for _, k := range key[0 : len(key)-1] {
|
|
||||||
_, ok = hashContext[k]
|
|
||||||
keyContext = append(keyContext, k)
|
|
||||||
|
|
||||||
// No key? Make an implicit hash and move on.
|
|
||||||
if !ok {
|
|
||||||
p.addImplicit(keyContext)
|
|
||||||
hashContext[k] = make(map[string]interface{})
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the hash context is actually an array of tables, then set
|
|
||||||
// the hash context to the last element in that array.
|
|
||||||
//
|
|
||||||
// Otherwise, it better be a table, since this MUST be a key group (by
|
|
||||||
// virtue of it not being the last element in a key).
|
|
||||||
switch t := hashContext[k].(type) {
|
|
||||||
case []map[string]interface{}:
|
|
||||||
hashContext = t[len(t)-1]
|
|
||||||
case map[string]interface{}:
|
|
||||||
hashContext = t
|
|
||||||
default:
|
|
||||||
p.panicf("Key '%s' was already created as a hash.", keyContext)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
p.context = keyContext
|
|
||||||
if array {
|
|
||||||
// If this is the first element for this array, then allocate a new
|
|
||||||
// list of tables for it.
|
|
||||||
k := key[len(key)-1]
|
|
||||||
if _, ok := hashContext[k]; !ok {
|
|
||||||
hashContext[k] = make([]map[string]interface{}, 0, 5)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add a new table. But make sure the key hasn't already been used
|
|
||||||
// for something else.
|
|
||||||
if hash, ok := hashContext[k].([]map[string]interface{}); ok {
|
|
||||||
hashContext[k] = append(hash, make(map[string]interface{}))
|
|
||||||
} else {
|
|
||||||
p.panicf("Key '%s' was already created and cannot be used as "+
|
|
||||||
"an array.", keyContext)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
p.setValue(key[len(key)-1], make(map[string]interface{}))
|
|
||||||
}
|
|
||||||
p.context = append(p.context, key[len(key)-1])
|
|
||||||
}
|
|
||||||
|
|
||||||
// setValue sets the given key to the given value in the current context.
|
|
||||||
// It will make sure that the key hasn't already been defined, account for
|
|
||||||
// implicit key groups.
|
|
||||||
func (p *parser) setValue(key string, value interface{}) {
|
|
||||||
var tmpHash interface{}
|
|
||||||
var ok bool
|
|
||||||
|
|
||||||
hash := p.mapping
|
|
||||||
keyContext := make(Key, 0)
|
|
||||||
for _, k := range p.context {
|
|
||||||
keyContext = append(keyContext, k)
|
|
||||||
if tmpHash, ok = hash[k]; !ok {
|
|
||||||
p.bug("Context for key '%s' has not been established.", keyContext)
|
|
||||||
}
|
|
||||||
switch t := tmpHash.(type) {
|
|
||||||
case []map[string]interface{}:
|
|
||||||
// The context is a table of hashes. Pick the most recent table
|
|
||||||
// defined as the current hash.
|
|
||||||
hash = t[len(t)-1]
|
|
||||||
case map[string]interface{}:
|
|
||||||
hash = t
|
|
||||||
default:
|
|
||||||
p.bug("Expected hash to have type 'map[string]interface{}', but "+
|
|
||||||
"it has '%T' instead.", tmpHash)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
keyContext = append(keyContext, key)
|
|
||||||
|
|
||||||
if _, ok := hash[key]; ok {
|
|
||||||
// Typically, if the given key has already been set, then we have
|
|
||||||
// to raise an error since duplicate keys are disallowed. However,
|
|
||||||
// it's possible that a key was previously defined implicitly. In this
|
|
||||||
// case, it is allowed to be redefined concretely. (See the
|
|
||||||
// `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.)
|
|
||||||
//
|
|
||||||
// But we have to make sure to stop marking it as an implicit. (So that
|
|
||||||
// another redefinition provokes an error.)
|
|
||||||
//
|
|
||||||
// Note that since it has already been defined (as a hash), we don't
|
|
||||||
// want to overwrite it. So our business is done.
|
|
||||||
if p.isImplicit(keyContext) {
|
|
||||||
p.removeImplicit(keyContext)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Otherwise, we have a concrete key trying to override a previous
|
|
||||||
// key, which is *always* wrong.
|
|
||||||
p.panicf("Key '%s' has already been defined.", keyContext)
|
|
||||||
}
|
|
||||||
hash[key] = value
|
|
||||||
}
|
|
||||||
|
|
||||||
// setType sets the type of a particular value at a given key.
|
|
||||||
// It should be called immediately AFTER setValue.
|
|
||||||
//
|
|
||||||
// Note that if `key` is empty, then the type given will be applied to the
|
|
||||||
// current context (which is either a table or an array of tables).
|
|
||||||
func (p *parser) setType(key string, typ tomlType) {
|
|
||||||
keyContext := make(Key, 0, len(p.context)+1)
|
|
||||||
for _, k := range p.context {
|
|
||||||
keyContext = append(keyContext, k)
|
|
||||||
}
|
|
||||||
if len(key) > 0 { // allow type setting for hashes
|
|
||||||
keyContext = append(keyContext, key)
|
|
||||||
}
|
|
||||||
p.types[keyContext.String()] = typ
|
|
||||||
}
|
|
||||||
|
|
||||||
// addImplicit sets the given Key as having been created implicitly.
|
|
||||||
func (p *parser) addImplicit(key Key) {
|
|
||||||
p.implicits[key.String()] = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// removeImplicit stops tagging the given key as having been implicitly
|
|
||||||
// created.
|
|
||||||
func (p *parser) removeImplicit(key Key) {
|
|
||||||
p.implicits[key.String()] = false
|
|
||||||
}
|
|
||||||
|
|
||||||
// isImplicit returns true if the key group pointed to by the key was created
|
|
||||||
// implicitly.
|
|
||||||
func (p *parser) isImplicit(key Key) bool {
|
|
||||||
return p.implicits[key.String()]
|
|
||||||
}
|
|
||||||
|
|
||||||
// current returns the full key name of the current context.
|
|
||||||
func (p *parser) current() string {
|
|
||||||
if len(p.currentKey) == 0 {
|
|
||||||
return p.context.String()
|
|
||||||
}
|
|
||||||
if len(p.context) == 0 {
|
|
||||||
return p.currentKey
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%s.%s", p.context, p.currentKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
func stripFirstNewline(s string) string {
|
|
||||||
if len(s) == 0 || s[0] != '\n' {
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
return s[1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
func stripEscapedWhitespace(s string) string {
|
|
||||||
esc := strings.Split(s, "\\\n")
|
|
||||||
if len(esc) > 1 {
|
|
||||||
for i := 1; i < len(esc); i++ {
|
|
||||||
esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return strings.Join(esc, "")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) replaceEscapes(str string) string {
|
|
||||||
var replaced []rune
|
|
||||||
s := []byte(str)
|
|
||||||
r := 0
|
|
||||||
for r < len(s) {
|
|
||||||
if s[r] != '\\' {
|
|
||||||
c, size := utf8.DecodeRune(s[r:])
|
|
||||||
r += size
|
|
||||||
replaced = append(replaced, c)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
r += 1
|
|
||||||
if r >= len(s) {
|
|
||||||
p.bug("Escape sequence at end of string.")
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
switch s[r] {
|
|
||||||
default:
|
|
||||||
p.bug("Expected valid escape code after \\, but got %q.", s[r])
|
|
||||||
return ""
|
|
||||||
case 'b':
|
|
||||||
replaced = append(replaced, rune(0x0008))
|
|
||||||
r += 1
|
|
||||||
case 't':
|
|
||||||
replaced = append(replaced, rune(0x0009))
|
|
||||||
r += 1
|
|
||||||
case 'n':
|
|
||||||
replaced = append(replaced, rune(0x000A))
|
|
||||||
r += 1
|
|
||||||
case 'f':
|
|
||||||
replaced = append(replaced, rune(0x000C))
|
|
||||||
r += 1
|
|
||||||
case 'r':
|
|
||||||
replaced = append(replaced, rune(0x000D))
|
|
||||||
r += 1
|
|
||||||
case '"':
|
|
||||||
replaced = append(replaced, rune(0x0022))
|
|
||||||
r += 1
|
|
||||||
case '\\':
|
|
||||||
replaced = append(replaced, rune(0x005C))
|
|
||||||
r += 1
|
|
||||||
case 'u':
|
|
||||||
// At this point, we know we have a Unicode escape of the form
|
|
||||||
// `uXXXX` at [r, r+5). (Because the lexer guarantees this
|
|
||||||
// for us.)
|
|
||||||
escaped := p.asciiEscapeToUnicode(s[r+1 : r+5])
|
|
||||||
replaced = append(replaced, escaped)
|
|
||||||
r += 5
|
|
||||||
case 'U':
|
|
||||||
// At this point, we know we have a Unicode escape of the form
|
|
||||||
// `uXXXX` at [r, r+9). (Because the lexer guarantees this
|
|
||||||
// for us.)
|
|
||||||
escaped := p.asciiEscapeToUnicode(s[r+1 : r+9])
|
|
||||||
replaced = append(replaced, escaped)
|
|
||||||
r += 9
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return string(replaced)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) asciiEscapeToUnicode(bs []byte) rune {
|
|
||||||
s := string(bs)
|
|
||||||
hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
|
|
||||||
if err != nil {
|
|
||||||
p.bug("Could not parse '%s' as a hexadecimal number, but the "+
|
|
||||||
"lexer claims it's OK: %s", s, err)
|
|
||||||
}
|
|
||||||
if !utf8.ValidRune(rune(hex)) {
|
|
||||||
p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s)
|
|
||||||
}
|
|
||||||
return rune(hex)
|
|
||||||
}
|
|
||||||
|
|
||||||
func isStringType(ty itemType) bool {
|
|
||||||
return ty == itemString || ty == itemMultilineString ||
|
|
||||||
ty == itemRawString || ty == itemRawMultilineString
|
|
||||||
}
|
|
|
@ -1,91 +0,0 @@
|
||||||
package toml
|
|
||||||
|
|
||||||
// tomlType represents any Go type that corresponds to a TOML type.
|
|
||||||
// While the first draft of the TOML spec has a simplistic type system that
|
|
||||||
// probably doesn't need this level of sophistication, we seem to be militating
|
|
||||||
// toward adding real composite types.
|
|
||||||
type tomlType interface {
|
|
||||||
typeString() string
|
|
||||||
}
|
|
||||||
|
|
||||||
// typeEqual accepts any two types and returns true if they are equal.
|
|
||||||
func typeEqual(t1, t2 tomlType) bool {
|
|
||||||
if t1 == nil || t2 == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return t1.typeString() == t2.typeString()
|
|
||||||
}
|
|
||||||
|
|
||||||
func typeIsHash(t tomlType) bool {
|
|
||||||
return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash)
|
|
||||||
}
|
|
||||||
|
|
||||||
type tomlBaseType string
|
|
||||||
|
|
||||||
func (btype tomlBaseType) typeString() string {
|
|
||||||
return string(btype)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (btype tomlBaseType) String() string {
|
|
||||||
return btype.typeString()
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
tomlInteger tomlBaseType = "Integer"
|
|
||||||
tomlFloat tomlBaseType = "Float"
|
|
||||||
tomlDatetime tomlBaseType = "Datetime"
|
|
||||||
tomlString tomlBaseType = "String"
|
|
||||||
tomlBool tomlBaseType = "Bool"
|
|
||||||
tomlArray tomlBaseType = "Array"
|
|
||||||
tomlHash tomlBaseType = "Hash"
|
|
||||||
tomlArrayHash tomlBaseType = "ArrayHash"
|
|
||||||
)
|
|
||||||
|
|
||||||
// typeOfPrimitive returns a tomlType of any primitive value in TOML.
|
|
||||||
// Primitive values are: Integer, Float, Datetime, String and Bool.
|
|
||||||
//
|
|
||||||
// Passing a lexer item other than the following will cause a BUG message
|
|
||||||
// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime.
|
|
||||||
func (p *parser) typeOfPrimitive(lexItem item) tomlType {
|
|
||||||
switch lexItem.typ {
|
|
||||||
case itemInteger:
|
|
||||||
return tomlInteger
|
|
||||||
case itemFloat:
|
|
||||||
return tomlFloat
|
|
||||||
case itemDatetime:
|
|
||||||
return tomlDatetime
|
|
||||||
case itemString:
|
|
||||||
return tomlString
|
|
||||||
case itemMultilineString:
|
|
||||||
return tomlString
|
|
||||||
case itemRawString:
|
|
||||||
return tomlString
|
|
||||||
case itemRawMultilineString:
|
|
||||||
return tomlString
|
|
||||||
case itemBool:
|
|
||||||
return tomlBool
|
|
||||||
}
|
|
||||||
p.bug("Cannot infer primitive type of lex item '%s'.", lexItem)
|
|
||||||
panic("unreachable")
|
|
||||||
}
|
|
||||||
|
|
||||||
// typeOfArray returns a tomlType for an array given a list of types of its
|
|
||||||
// values.
|
|
||||||
//
|
|
||||||
// In the current spec, if an array is homogeneous, then its type is always
|
|
||||||
// "Array". If the array is not homogeneous, an error is generated.
|
|
||||||
func (p *parser) typeOfArray(types []tomlType) tomlType {
|
|
||||||
// Empty arrays are cool.
|
|
||||||
if len(types) == 0 {
|
|
||||||
return tomlArray
|
|
||||||
}
|
|
||||||
|
|
||||||
theType := types[0]
|
|
||||||
for _, t := range types[1:] {
|
|
||||||
if !typeEqual(theType, t) {
|
|
||||||
p.panicf("Array contains values of type '%s' and '%s', but "+
|
|
||||||
"arrays must be homogeneous.", theType, t)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return tomlArray
|
|
||||||
}
|
|
|
@ -1,242 +0,0 @@
|
||||||
package toml
|
|
||||||
|
|
||||||
// Struct field handling is adapted from code in encoding/json:
|
|
||||||
//
|
|
||||||
// Copyright 2010 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the Go distribution.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
"sort"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A field represents a single field found in a struct.
|
|
||||||
type field struct {
|
|
||||||
name string // the name of the field (`toml` tag included)
|
|
||||||
tag bool // whether field has a `toml` tag
|
|
||||||
index []int // represents the depth of an anonymous field
|
|
||||||
typ reflect.Type // the type of the field
|
|
||||||
}
|
|
||||||
|
|
||||||
// byName sorts field by name, breaking ties with depth,
|
|
||||||
// then breaking ties with "name came from toml tag", then
|
|
||||||
// breaking ties with index sequence.
|
|
||||||
type byName []field
|
|
||||||
|
|
||||||
func (x byName) Len() int { return len(x) }
|
|
||||||
|
|
||||||
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
|
||||||
|
|
||||||
func (x byName) Less(i, j int) bool {
|
|
||||||
if x[i].name != x[j].name {
|
|
||||||
return x[i].name < x[j].name
|
|
||||||
}
|
|
||||||
if len(x[i].index) != len(x[j].index) {
|
|
||||||
return len(x[i].index) < len(x[j].index)
|
|
||||||
}
|
|
||||||
if x[i].tag != x[j].tag {
|
|
||||||
return x[i].tag
|
|
||||||
}
|
|
||||||
return byIndex(x).Less(i, j)
|
|
||||||
}
|
|
||||||
|
|
||||||
// byIndex sorts field by index sequence.
|
|
||||||
type byIndex []field
|
|
||||||
|
|
||||||
func (x byIndex) Len() int { return len(x) }
|
|
||||||
|
|
||||||
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
|
||||||
|
|
||||||
func (x byIndex) Less(i, j int) bool {
|
|
||||||
for k, xik := range x[i].index {
|
|
||||||
if k >= len(x[j].index) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if xik != x[j].index[k] {
|
|
||||||
return xik < x[j].index[k]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return len(x[i].index) < len(x[j].index)
|
|
||||||
}
|
|
||||||
|
|
||||||
// typeFields returns a list of fields that TOML should recognize for the given
|
|
||||||
// type. The algorithm is breadth-first search over the set of structs to
|
|
||||||
// include - the top struct and then any reachable anonymous structs.
|
|
||||||
func typeFields(t reflect.Type) []field {
|
|
||||||
// Anonymous fields to explore at the current level and the next.
|
|
||||||
current := []field{}
|
|
||||||
next := []field{{typ: t}}
|
|
||||||
|
|
||||||
// Count of queued names for current level and the next.
|
|
||||||
count := map[reflect.Type]int{}
|
|
||||||
nextCount := map[reflect.Type]int{}
|
|
||||||
|
|
||||||
// Types already visited at an earlier level.
|
|
||||||
visited := map[reflect.Type]bool{}
|
|
||||||
|
|
||||||
// Fields found.
|
|
||||||
var fields []field
|
|
||||||
|
|
||||||
for len(next) > 0 {
|
|
||||||
current, next = next, current[:0]
|
|
||||||
count, nextCount = nextCount, map[reflect.Type]int{}
|
|
||||||
|
|
||||||
for _, f := range current {
|
|
||||||
if visited[f.typ] {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
visited[f.typ] = true
|
|
||||||
|
|
||||||
// Scan f.typ for fields to include.
|
|
||||||
for i := 0; i < f.typ.NumField(); i++ {
|
|
||||||
sf := f.typ.Field(i)
|
|
||||||
if sf.PkgPath != "" && !sf.Anonymous { // unexported
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
opts := getOptions(sf.Tag)
|
|
||||||
if opts.skip {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
index := make([]int, len(f.index)+1)
|
|
||||||
copy(index, f.index)
|
|
||||||
index[len(f.index)] = i
|
|
||||||
|
|
||||||
ft := sf.Type
|
|
||||||
if ft.Name() == "" && ft.Kind() == reflect.Ptr {
|
|
||||||
// Follow pointer.
|
|
||||||
ft = ft.Elem()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Record found field and index sequence.
|
|
||||||
if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
|
|
||||||
tagged := opts.name != ""
|
|
||||||
name := opts.name
|
|
||||||
if name == "" {
|
|
||||||
name = sf.Name
|
|
||||||
}
|
|
||||||
fields = append(fields, field{name, tagged, index, ft})
|
|
||||||
if count[f.typ] > 1 {
|
|
||||||
// If there were multiple instances, add a second,
|
|
||||||
// so that the annihilation code will see a duplicate.
|
|
||||||
// It only cares about the distinction between 1 or 2,
|
|
||||||
// so don't bother generating any more copies.
|
|
||||||
fields = append(fields, fields[len(fields)-1])
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Record new anonymous struct to explore in next round.
|
|
||||||
nextCount[ft]++
|
|
||||||
if nextCount[ft] == 1 {
|
|
||||||
f := field{name: ft.Name(), index: index, typ: ft}
|
|
||||||
next = append(next, f)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Sort(byName(fields))
|
|
||||||
|
|
||||||
// Delete all fields that are hidden by the Go rules for embedded fields,
|
|
||||||
// except that fields with TOML tags are promoted.
|
|
||||||
|
|
||||||
// The fields are sorted in primary order of name, secondary order
|
|
||||||
// of field index length. Loop over names; for each name, delete
|
|
||||||
// hidden fields by choosing the one dominant field that survives.
|
|
||||||
out := fields[:0]
|
|
||||||
for advance, i := 0, 0; i < len(fields); i += advance {
|
|
||||||
// One iteration per name.
|
|
||||||
// Find the sequence of fields with the name of this first field.
|
|
||||||
fi := fields[i]
|
|
||||||
name := fi.name
|
|
||||||
for advance = 1; i+advance < len(fields); advance++ {
|
|
||||||
fj := fields[i+advance]
|
|
||||||
if fj.name != name {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if advance == 1 { // Only one field with this name
|
|
||||||
out = append(out, fi)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
dominant, ok := dominantField(fields[i : i+advance])
|
|
||||||
if ok {
|
|
||||||
out = append(out, dominant)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fields = out
|
|
||||||
sort.Sort(byIndex(fields))
|
|
||||||
|
|
||||||
return fields
|
|
||||||
}
|
|
||||||
|
|
||||||
// dominantField looks through the fields, all of which are known to
|
|
||||||
// have the same name, to find the single field that dominates the
|
|
||||||
// others using Go's embedding rules, modified by the presence of
|
|
||||||
// TOML tags. If there are multiple top-level fields, the boolean
|
|
||||||
// will be false: This condition is an error in Go and we skip all
|
|
||||||
// the fields.
|
|
||||||
func dominantField(fields []field) (field, bool) {
|
|
||||||
// The fields are sorted in increasing index-length order. The winner
|
|
||||||
// must therefore be one with the shortest index length. Drop all
|
|
||||||
// longer entries, which is easy: just truncate the slice.
|
|
||||||
length := len(fields[0].index)
|
|
||||||
tagged := -1 // Index of first tagged field.
|
|
||||||
for i, f := range fields {
|
|
||||||
if len(f.index) > length {
|
|
||||||
fields = fields[:i]
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if f.tag {
|
|
||||||
if tagged >= 0 {
|
|
||||||
// Multiple tagged fields at the same level: conflict.
|
|
||||||
// Return no field.
|
|
||||||
return field{}, false
|
|
||||||
}
|
|
||||||
tagged = i
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if tagged >= 0 {
|
|
||||||
return fields[tagged], true
|
|
||||||
}
|
|
||||||
// All remaining fields have the same length. If there's more than one,
|
|
||||||
// we have a conflict (two fields named "X" at the same level) and we
|
|
||||||
// return no field.
|
|
||||||
if len(fields) > 1 {
|
|
||||||
return field{}, false
|
|
||||||
}
|
|
||||||
return fields[0], true
|
|
||||||
}
|
|
||||||
|
|
||||||
var fieldCache struct {
|
|
||||||
sync.RWMutex
|
|
||||||
m map[reflect.Type][]field
|
|
||||||
}
|
|
||||||
|
|
||||||
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
|
|
||||||
func cachedTypeFields(t reflect.Type) []field {
|
|
||||||
fieldCache.RLock()
|
|
||||||
f := fieldCache.m[t]
|
|
||||||
fieldCache.RUnlock()
|
|
||||||
if f != nil {
|
|
||||||
return f
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compute fields without lock.
|
|
||||||
// Might duplicate effort but won't hold other computations back.
|
|
||||||
f = typeFields(t)
|
|
||||||
if f == nil {
|
|
||||||
f = []field{}
|
|
||||||
}
|
|
||||||
|
|
||||||
fieldCache.Lock()
|
|
||||||
if fieldCache.m == nil {
|
|
||||||
fieldCache.m = map[reflect.Type][]field{}
|
|
||||||
}
|
|
||||||
fieldCache.m[t] = f
|
|
||||||
fieldCache.Unlock()
|
|
||||||
return f
|
|
||||||
}
|
|
|
@ -1,21 +0,0 @@
|
||||||
The MIT License (MIT)
|
|
||||||
|
|
||||||
Copyright (c) 2016 Shopify
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in
|
|
||||||
all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
||||||
THE SOFTWARE.
|
|
|
@ -1,24 +0,0 @@
|
||||||
## logrus-bugsnag
|
|
||||||
|
|
||||||
[![Build Status](https://travis-ci.org/Shopify/logrus-bugsnag.svg)](https://travis-ci.org/Shopify/logrus-bugsnag)
|
|
||||||
|
|
||||||
logrus-bugsnag is a hook that allows [Logrus](https://github.com/sirupsen/logrus) to interface with [Bugsnag](https://bugsnag.com).
|
|
||||||
|
|
||||||
#### Usage
|
|
||||||
|
|
||||||
```go
|
|
||||||
import (
|
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
"github.com/Shopify/logrus-bugsnag"
|
|
||||||
bugsnag "github.com/bugsnag/bugsnag-go"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
bugsnag.Configure(bugsnag.Configuration{
|
|
||||||
APIKey: apiKey,
|
|
||||||
})
|
|
||||||
hook, err := logrus_bugsnag.NewBugsnagHook()
|
|
||||||
logrus.StandardLogger().Hooks.Add(hook)
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
|
@ -1,81 +0,0 @@
|
||||||
package logrus_bugsnag
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
"github.com/bugsnag/bugsnag-go"
|
|
||||||
bugsnag_errors "github.com/bugsnag/bugsnag-go/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
type bugsnagHook struct{}
|
|
||||||
|
|
||||||
// ErrBugsnagUnconfigured is returned if NewBugsnagHook is called before
|
|
||||||
// bugsnag.Configure. Bugsnag must be configured before the hook.
|
|
||||||
var ErrBugsnagUnconfigured = errors.New("bugsnag must be configured before installing this logrus hook")
|
|
||||||
|
|
||||||
// ErrBugsnagSendFailed indicates that the hook failed to submit an error to
|
|
||||||
// bugsnag. The error was successfully generated, but `bugsnag.Notify()`
|
|
||||||
// failed.
|
|
||||||
type ErrBugsnagSendFailed struct {
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e ErrBugsnagSendFailed) Error() string {
|
|
||||||
return "failed to send error to Bugsnag: " + e.err.Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBugsnagHook initializes a logrus hook which sends exceptions to an
|
|
||||||
// exception-tracking service compatible with the Bugsnag API. Before using
|
|
||||||
// this hook, you must call bugsnag.Configure(). The returned object should be
|
|
||||||
// registered with a log via `AddHook()`
|
|
||||||
//
|
|
||||||
// Entries that trigger an Error, Fatal or Panic should now include an "error"
|
|
||||||
// field to send to Bugsnag.
|
|
||||||
func NewBugsnagHook() (*bugsnagHook, error) {
|
|
||||||
if bugsnag.Config.APIKey == "" {
|
|
||||||
return nil, ErrBugsnagUnconfigured
|
|
||||||
}
|
|
||||||
return &bugsnagHook{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// skipStackFrames skips logrus stack frames before logging to Bugsnag.
|
|
||||||
const skipStackFrames = 4
|
|
||||||
|
|
||||||
// Fire forwards an error to Bugsnag. Given a logrus.Entry, it extracts the
|
|
||||||
// "error" field (or the Message if the error isn't present) and sends it off.
|
|
||||||
func (hook *bugsnagHook) Fire(entry *logrus.Entry) error {
|
|
||||||
var notifyErr error
|
|
||||||
err, ok := entry.Data["error"].(error)
|
|
||||||
if ok {
|
|
||||||
notifyErr = err
|
|
||||||
} else {
|
|
||||||
notifyErr = errors.New(entry.Message)
|
|
||||||
}
|
|
||||||
|
|
||||||
metadata := bugsnag.MetaData{}
|
|
||||||
metadata["metadata"] = make(map[string]interface{})
|
|
||||||
for key, val := range entry.Data {
|
|
||||||
if key != "error" {
|
|
||||||
metadata["metadata"][key] = val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
errWithStack := bugsnag_errors.New(notifyErr, skipStackFrames)
|
|
||||||
bugsnagErr := bugsnag.Notify(errWithStack, metadata)
|
|
||||||
if bugsnagErr != nil {
|
|
||||||
return ErrBugsnagSendFailed{bugsnagErr}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Levels enumerates the log levels on which the error should be forwarded to
|
|
||||||
// bugsnag: everything at or above the "Error" level.
|
|
||||||
func (hook *bugsnagHook) Levels() []logrus.Level {
|
|
||||||
return []logrus.Level{
|
|
||||||
logrus.ErrorLevel,
|
|
||||||
logrus.FatalLevel,
|
|
||||||
logrus.PanicLevel,
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,20 +0,0 @@
|
||||||
Copyright (c) 2014 Bugsnag
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining
|
|
||||||
a copy of this software and associated documentation files (the
|
|
||||||
"Software"), to deal in the Software without restriction, including
|
|
||||||
without limitation the rights to use, copy, modify, merge, publish,
|
|
||||||
distribute, sublicense, and/or sell copies of the Software, and to
|
|
||||||
permit persons to whom the Software is furnished to do so, subject to
|
|
||||||
the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be
|
|
||||||
included in all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
||||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
|
||||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
|
||||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
|
||||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
|
@ -1,522 +0,0 @@
|
||||||
Bugsnag Notifier for Golang
|
|
||||||
===========================
|
|
||||||
|
|
||||||
The Bugsnag Notifier for Golang gives you instant notification of panics, or
|
|
||||||
unexpected errors, in your golang app. Any unhandled panics will trigger a
|
|
||||||
notification to be sent to your Bugsnag project.
|
|
||||||
|
|
||||||
[Bugsnag](http://bugsnag.com) captures errors in real-time from your web,
|
|
||||||
mobile and desktop applications, helping you to understand and resolve them
|
|
||||||
as fast as possible. [Create a free account](http://bugsnag.com) to start
|
|
||||||
capturing exceptions from your applications.
|
|
||||||
|
|
||||||
## How to Install
|
|
||||||
|
|
||||||
1. Download the code
|
|
||||||
|
|
||||||
```shell
|
|
||||||
go get github.com/bugsnag/bugsnag-go
|
|
||||||
```
|
|
||||||
|
|
||||||
### Using with net/http apps
|
|
||||||
|
|
||||||
For a golang app based on [net/http](https://godoc.org/net/http), integrating
|
|
||||||
Bugsnag takes two steps. You should also use these instructions if you're using
|
|
||||||
the [gorilla toolkit](http://www.gorillatoolkit.org/), or the
|
|
||||||
[pat](https://github.com/bmizerany/pat/) muxer.
|
|
||||||
|
|
||||||
1. Configure bugsnag at the start of your `main()` function:
|
|
||||||
|
|
||||||
```go
|
|
||||||
import "github.com/bugsnag/bugsnag-go"
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
bugsnag.Configure(bugsnag.Configuration{
|
|
||||||
APIKey: "YOUR_API_KEY_HERE",
|
|
||||||
ReleaseStage: "production",
|
|
||||||
// more configuration options
|
|
||||||
})
|
|
||||||
|
|
||||||
// rest of your program.
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
2. Wrap your server in a [bugsnag.Handler](https://godoc.org/github.com/bugsnag/bugsnag-go/#Handler)
|
|
||||||
|
|
||||||
```go
|
|
||||||
// a. If you're using the builtin http mux, you can just pass
|
|
||||||
// bugsnag.Handler(nil) to http.ListenAndServer
|
|
||||||
http.ListenAndServe(":8080", bugsnag.Handler(nil))
|
|
||||||
|
|
||||||
// b. If you're creating a server manually yourself, you can set
|
|
||||||
// its handlers the same way
|
|
||||||
srv := http.Server{
|
|
||||||
Handler: bugsnag.Handler(nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// c. If you're not using the builtin http mux, wrap your own handler
|
|
||||||
// (though make sure that it doesn't already catch panics)
|
|
||||||
http.ListenAndServe(":8080", bugsnag.Handler(handler))
|
|
||||||
```
|
|
||||||
|
|
||||||
### Using with Revel apps
|
|
||||||
|
|
||||||
There are two steps to get panic handling in [revel](https://revel.github.io) apps.
|
|
||||||
|
|
||||||
1. Add the `bugsnagrevel.Filter` immediately after the `revel.PanicFilter` in `app/init.go`:
|
|
||||||
|
|
||||||
```go
|
|
||||||
|
|
||||||
import "github.com/bugsnag/bugsnag-go/revel"
|
|
||||||
|
|
||||||
revel.Filters = []revel.Filter{
|
|
||||||
revel.PanicFilter,
|
|
||||||
bugsnagrevel.Filter,
|
|
||||||
// ...
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
2. Set bugsnag.apikey in the top section of `conf/app.conf`.
|
|
||||||
|
|
||||||
```
|
|
||||||
module.static=github.com/revel/revel/modules/static
|
|
||||||
|
|
||||||
bugsnag.apikey=YOUR_API_KEY_HERE
|
|
||||||
|
|
||||||
[dev]
|
|
||||||
```
|
|
||||||
|
|
||||||
### Using with Google App Engine
|
|
||||||
|
|
||||||
1. Configure bugsnag at the start of your `init()` function:
|
|
||||||
|
|
||||||
```go
|
|
||||||
import "github.com/bugsnag/bugsnag-go"
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
bugsnag.Configure(bugsnag.Configuration{
|
|
||||||
APIKey: "YOUR_API_KEY_HERE",
|
|
||||||
})
|
|
||||||
|
|
||||||
// ...
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
2. Wrap *every* http.Handler or http.HandlerFunc with Bugsnag:
|
|
||||||
|
|
||||||
```go
|
|
||||||
// a. If you're using HandlerFuncs
|
|
||||||
http.HandleFunc("/", bugsnag.HandlerFunc(
|
|
||||||
func (w http.ResponseWriter, r *http.Request) {
|
|
||||||
// ...
|
|
||||||
}))
|
|
||||||
|
|
||||||
// b. If you're using Handlers
|
|
||||||
http.Handle("/", bugsnag.Handler(myHttpHandler))
|
|
||||||
```
|
|
||||||
|
|
||||||
3. In order to use Bugsnag, you must provide the current
|
|
||||||
[`appengine.Context`](https://developers.google.com/appengine/docs/go/reference#Context), or
|
|
||||||
current `*http.Request` as rawData (This is done automatically for `bugsnag.Handler` and `bugsnag.HandlerFunc`).
|
|
||||||
The easiest way to do this is to create a new instance of the notifier.
|
|
||||||
|
|
||||||
```go
|
|
||||||
c := appengine.NewContext(r)
|
|
||||||
notifier := bugsnag.New(c)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
notifier.Notify(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
go func () {
|
|
||||||
defer notifier.Recover()
|
|
||||||
|
|
||||||
// ...
|
|
||||||
}()
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
## Notifying Bugsnag manually
|
|
||||||
|
|
||||||
Bugsnag will automatically handle any panics that crash your program and notify
|
|
||||||
you of them. If you've integrated with `revel` or `net/http`, then you'll also
|
|
||||||
be notified of any panics() that happen while processing a request.
|
|
||||||
|
|
||||||
Sometimes however it's useful to manually notify Bugsnag of a problem. To do this,
|
|
||||||
call [`bugsnag.Notify()`](https://godoc.org/github.com/bugsnag/bugsnag-go/#Notify)
|
|
||||||
|
|
||||||
```go
|
|
||||||
if err != nil {
|
|
||||||
bugsnag.Notify(err)
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Manual panic handling
|
|
||||||
|
|
||||||
To avoid a panic in a goroutine from crashing your entire app, you can use
|
|
||||||
[`bugsnag.Recover()`](https://godoc.org/github.com/bugsnag/bugsnag-go/#Recover)
|
|
||||||
to stop a panic from unwinding the stack any further. When `Recover()` is hit,
|
|
||||||
it will send any current panic to Bugsnag and then stop panicking. This is
|
|
||||||
most useful at the start of a goroutine:
|
|
||||||
|
|
||||||
```go
|
|
||||||
go func() {
|
|
||||||
defer bugsnag.Recover()
|
|
||||||
|
|
||||||
// ...
|
|
||||||
}()
|
|
||||||
```
|
|
||||||
|
|
||||||
Alternatively you can use
|
|
||||||
[`bugsnag.AutoNotify()`](https://godoc.org/github.com/bugsnag/bugsnag-go/#Recover)
|
|
||||||
to notify bugsnag of a panic while letting the program continue to panic. This
|
|
||||||
is useful if you're using a Framework that already has some handling of panics
|
|
||||||
and you are retrofitting bugsnag support.
|
|
||||||
|
|
||||||
```go
|
|
||||||
defer bugsnag.AutoNotify()
|
|
||||||
```
|
|
||||||
|
|
||||||
## Sending Custom Data
|
|
||||||
|
|
||||||
Most functions in the Bugsnag API, including `bugsnag.Notify()`,
|
|
||||||
`bugsnag.Recover()`, `bugsnag.AutoNotify()`, and `bugsnag.Handler()` let you
|
|
||||||
attach data to the notifications that they send. To do this you pass in rawData,
|
|
||||||
which can be any of the supported types listed here. To add support for more
|
|
||||||
types of rawData see [OnBeforeNotify](#custom-data-with-onbeforenotify).
|
|
||||||
|
|
||||||
### Custom MetaData
|
|
||||||
|
|
||||||
Custom metaData appears as tabs on Bugsnag.com. You can set it by passing
|
|
||||||
a [`bugsnag.MetaData`](https://godoc.org/github.com/bugsnag/bugsnag-go/#MetaData)
|
|
||||||
object as rawData.
|
|
||||||
|
|
||||||
```go
|
|
||||||
bugsnag.Notify(err,
|
|
||||||
bugsnag.MetaData{
|
|
||||||
"Account": {
|
|
||||||
"Name": Account.Name,
|
|
||||||
"Paying": Account.Plan.Premium,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
```
|
|
||||||
|
|
||||||
### Request data
|
|
||||||
|
|
||||||
Bugsnag can extract interesting data from
|
|
||||||
[`*http.Request`](https://godoc.org/net/http/#Request) objects, and
|
|
||||||
[`*revel.Controller`](https://godoc.org/github.com/revel/revel/#Controller)
|
|
||||||
objects. These are automatically passed in when handling panics, and you can
|
|
||||||
pass them yourself.
|
|
||||||
|
|
||||||
```go
|
|
||||||
func (w http.ResponseWriter, r *http.Request) {
|
|
||||||
bugsnag.Notify(err, r)
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### User data
|
|
||||||
|
|
||||||
User data is searchable, and the `Id` powers the count of users affected. You
|
|
||||||
can set which user an error affects by passing a
|
|
||||||
[`bugsnag.User`](https://godoc.org/github.com/bugsnag/bugsnag-go/#User) object as
|
|
||||||
rawData.
|
|
||||||
|
|
||||||
```go
|
|
||||||
bugsnag.Notify(err,
|
|
||||||
bugsnag.User{Id: "1234", Name: "Conrad", Email: "me@cirw.in"})
|
|
||||||
```
|
|
||||||
|
|
||||||
### Error Class
|
|
||||||
|
|
||||||
Errors in your Bugsnag dashboard are grouped by their "error class" and by line number.
|
|
||||||
You can override the error class by passing a
|
|
||||||
[`bugsnag.ErrorClass`](https://godoc.org/github.com/bugsnag/bugsnag-go/#ErrorClass) object as
|
|
||||||
rawData.
|
|
||||||
|
|
||||||
```go
|
|
||||||
bugsnag.Notify(err, bugsnag.ErrorClass{"I/O Timeout"})
|
|
||||||
```
|
|
||||||
|
|
||||||
### Context
|
|
||||||
|
|
||||||
The context shows up prominently in the list view so that you can get an idea
|
|
||||||
of where a problem occurred. You can set it by passing a
|
|
||||||
[`bugsnag.Context`](https://godoc.org/github.com/bugsnag/bugsnag-go/#Context)
|
|
||||||
object as rawData.
|
|
||||||
|
|
||||||
```go
|
|
||||||
bugsnag.Notify(err, bugsnag.Context{"backgroundJob"})
|
|
||||||
```
|
|
||||||
|
|
||||||
### Severity
|
|
||||||
|
|
||||||
Bugsnag supports three severities, `SeverityError`, `SeverityWarning`, and `SeverityInfo`.
|
|
||||||
You can set the severity of an error by passing one of these objects as rawData.
|
|
||||||
|
|
||||||
```go
|
|
||||||
bugsnag.Notify(err, bugsnag.SeverityInfo)
|
|
||||||
```
|
|
||||||
|
|
||||||
## Configuration
|
|
||||||
|
|
||||||
You must call `bugsnag.Configure()` at the start of your program to use Bugsnag, you pass it
|
|
||||||
a [`bugsnag.Configuration`](https://godoc.org/github.com/bugsnag/bugsnag-go/#Configuration) object
|
|
||||||
containing any of the following values.
|
|
||||||
|
|
||||||
### APIKey
|
|
||||||
|
|
||||||
The Bugsnag API key can be found on your [Bugsnag dashboard](https://bugsnag.com) under "Settings".
|
|
||||||
|
|
||||||
```go
|
|
||||||
bugsnag.Configure(bugsnag.Configuration{
|
|
||||||
APIKey: "YOUR_API_KEY_HERE",
|
|
||||||
})
|
|
||||||
```
|
|
||||||
|
|
||||||
### Endpoint
|
|
||||||
|
|
||||||
The Bugsnag endpoint defaults to `https://notify.bugsnag.com/`. If you're using Bugsnag enterprise,
|
|
||||||
you should set this to the endpoint of your local instance.
|
|
||||||
|
|
||||||
```go
|
|
||||||
bugsnag.Configure(bugsnag.Configuration{
|
|
||||||
Endpoint: "http://bugsnag.internal:49000/",
|
|
||||||
})
|
|
||||||
```
|
|
||||||
|
|
||||||
### ReleaseStage
|
|
||||||
|
|
||||||
The ReleaseStage tracks where your app is deployed. You should set this to `production`, `staging`,
|
|
||||||
`development` or similar as appropriate.
|
|
||||||
|
|
||||||
```go
|
|
||||||
bugsnag.Configure(bugsnag.Configuration{
|
|
||||||
ReleaseStage: "development",
|
|
||||||
})
|
|
||||||
```
|
|
||||||
|
|
||||||
### NotifyReleaseStages
|
|
||||||
|
|
||||||
The list of ReleaseStages to notify in. By default Bugsnag will notify you in all release stages, but
|
|
||||||
you can use this to silence development errors.
|
|
||||||
|
|
||||||
```go
|
|
||||||
bugsnag.Configure(bugsnag.Configuration{
|
|
||||||
NotifyReleaseStages: []string{"production", "staging"},
|
|
||||||
})
|
|
||||||
```
|
|
||||||
|
|
||||||
### AppVersion
|
|
||||||
|
|
||||||
If you use a versioning scheme for deploys of your app, Bugsnag can use the `AppVersion` to only
|
|
||||||
re-open errors if they occur in later version of the app.
|
|
||||||
|
|
||||||
```go
|
|
||||||
bugsnag.Configure(bugsnag.Configuration{
|
|
||||||
AppVersion: "1.2.3",
|
|
||||||
})
|
|
||||||
```
|
|
||||||
|
|
||||||
### Hostname
|
|
||||||
|
|
||||||
The hostname is used to track where exceptions are coming from in the Bugsnag dashboard. The
|
|
||||||
default value is obtained from `os.Hostname()` so you won't often need to change this.
|
|
||||||
|
|
||||||
```go
|
|
||||||
bugsnag.Configure(bugsnag.Configuration{
|
|
||||||
Hostname: "go1",
|
|
||||||
})
|
|
||||||
```
|
|
||||||
|
|
||||||
### ProjectPackages
|
|
||||||
|
|
||||||
In order to determine where a crash happens Bugsnag needs to know which packages you consider to
|
|
||||||
be part of your app (as opposed to a library). By default this is set to `[]string{"main*"}`. Strings
|
|
||||||
are matched to package names using [`filepath.Match`](http://godoc.org/path/filepath#Match).
|
|
||||||
|
|
||||||
```go
|
|
||||||
bugsnag.Configure(bugsnag.Configuration{
|
|
||||||
ProjectPackages: []string{"main", "github.com/domain/myapp/*"},
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### ParamsFilters
|
|
||||||
|
|
||||||
Sometimes sensitive data is accidentally included in Bugsnag MetaData. You can remove it by
|
|
||||||
setting `ParamsFilters`. Any key in the `MetaData` that includes any string in the filters
|
|
||||||
will be redacted. The default is `[]string{"password", "secret"}`, which prevents fields like
|
|
||||||
`password`, `password_confirmation` and `secret_answer` from being sent.
|
|
||||||
|
|
||||||
```go
|
|
||||||
bugsnag.Configure(bugsnag.Configuration{
|
|
||||||
ParamsFilters: []string{"password", "secret"},
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Logger
|
|
||||||
|
|
||||||
The Logger to write to in case of an error inside Bugsnag. This defaults to the global logger.
|
|
||||||
|
|
||||||
```go
|
|
||||||
bugsnag.Configure(bugsnag.Configuration{
|
|
||||||
Logger: app.Logger,
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### PanicHandler
|
|
||||||
|
|
||||||
The first time Bugsnag is configured, it wraps the running program in a panic
|
|
||||||
handler using [panicwrap](http://godoc.org/github.com/ConradIrwin/panicwrap). This
|
|
||||||
forks a sub-process which monitors unhandled panics. To prevent this, set
|
|
||||||
`PanicHandler` to `func() {}` the first time you call
|
|
||||||
`bugsnag.Configure`. This will prevent bugsnag from being able to notify you about
|
|
||||||
unhandled panics.
|
|
||||||
|
|
||||||
```go
|
|
||||||
bugsnag.Configure(bugsnag.Configuration{
|
|
||||||
PanicHandler: func() {},
|
|
||||||
})
|
|
||||||
```
|
|
||||||
|
|
||||||
### Synchronous
|
|
||||||
|
|
||||||
Bugsnag usually starts a new goroutine before sending notifications. This means
|
|
||||||
that notifications can be lost if you do a bugsnag.Notify and then immediately
|
|
||||||
os.Exit. To avoid this problem, set Bugsnag to Synchronous (or just `panic()`
|
|
||||||
instead ;).
|
|
||||||
|
|
||||||
```go
|
|
||||||
bugsnag.Configure(bugsnag.Configuration{
|
|
||||||
Synchronous: true
|
|
||||||
})
|
|
||||||
```
|
|
||||||
|
|
||||||
Or just for one error:
|
|
||||||
|
|
||||||
```go
|
|
||||||
bugsnag.Notify(err, bugsnag.Configuration{Synchronous: true})
|
|
||||||
```
|
|
||||||
|
|
||||||
### Transport
|
|
||||||
|
|
||||||
The transport configures how Bugsnag makes http requests. By default we use
|
|
||||||
[`http.DefaultTransport`](http://godoc.org/net/http#RoundTripper) which handles
|
|
||||||
HTTP proxies automatically using the `$HTTP_PROXY` environment variable.
|
|
||||||
|
|
||||||
```go
|
|
||||||
bugsnag.Configure(bugsnag.Configuration{
|
|
||||||
Transport: http.DefaultTransport,
|
|
||||||
})
|
|
||||||
```
|
|
||||||
|
|
||||||
## Custom data with OnBeforeNotify
|
|
||||||
|
|
||||||
While it's nice that you can pass `MetaData` directly into `bugsnag.Notify`,
|
|
||||||
`bugsnag.AutoNotify`, and `bugsnag.Recover`, this can be a bit cumbersome and
|
|
||||||
inefficient — you're constructing the meta-data whether or not it will actually
|
|
||||||
be used. A better idea is to pass raw data in to these functions, and add an
|
|
||||||
`OnBeforeNotify` filter that converts them into `MetaData`.
|
|
||||||
|
|
||||||
For example, lets say our system processes jobs:
|
|
||||||
|
|
||||||
```go
|
|
||||||
type Job struct{
|
|
||||||
Retry bool
|
|
||||||
UserId string
|
|
||||||
UserEmail string
|
|
||||||
Name string
|
|
||||||
Params map[string]string
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
You can pass a job directly into Bugsnag.notify:
|
|
||||||
|
|
||||||
```go
|
|
||||||
bugsnag.Notify(err, job)
|
|
||||||
```
|
|
||||||
|
|
||||||
And then add a filter to extract information from that job and attach it to the
|
|
||||||
Bugsnag event:
|
|
||||||
|
|
||||||
```go
|
|
||||||
bugsnag.OnBeforeNotify(
|
|
||||||
func(event *bugsnag.Event, config *bugsnag.Configuration) error {
|
|
||||||
|
|
||||||
// Search all the RawData for any *Job pointers that we're passed in
|
|
||||||
// to bugsnag.Notify() and friends.
|
|
||||||
for _, datum := range event.RawData {
|
|
||||||
if job, ok := datum.(*Job); ok {
|
|
||||||
// don't notify bugsnag about errors in retries
|
|
||||||
if job.Retry {
|
|
||||||
return fmt.Errorf("not notifying about retried jobs")
|
|
||||||
}
|
|
||||||
|
|
||||||
// add the job as a tab on Bugsnag.com
|
|
||||||
event.MetaData.AddStruct("Job", job)
|
|
||||||
|
|
||||||
// set the user correctly
|
|
||||||
event.User = &User{Id: job.UserId, Email: job.UserEmail}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// continue notifying as normal
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
```
|
|
||||||
|
|
||||||
## Advanced Usage
|
|
||||||
|
|
||||||
If you want to have multiple different configurations around in one program,
|
|
||||||
you can use `bugsnag.New()` to create multiple independent instances of
|
|
||||||
Bugsnag. You can use these without calling `bugsnag.Configure()`, but bear in
|
|
||||||
mind that until you call `bugsnag.Configure()` unhandled panics will not be
|
|
||||||
sent to bugsnag.
|
|
||||||
|
|
||||||
```go
|
|
||||||
notifier := bugsnag.New(bugsnag.Configuration{
|
|
||||||
APIKey: "YOUR_OTHER_API_KEY",
|
|
||||||
})
|
|
||||||
```
|
|
||||||
|
|
||||||
In fact any place that lets you pass in `rawData` also allows you to pass in
|
|
||||||
configuration. For example to send http errors to one bugsnag project, you
|
|
||||||
could do:
|
|
||||||
|
|
||||||
```go
|
|
||||||
bugsnag.Handler(nil, bugsnag.Configuration{APIKey: "YOUR_OTHER_API_KEY"})
|
|
||||||
```
|
|
||||||
|
|
||||||
### GroupingHash
|
|
||||||
|
|
||||||
If you need to override Bugsnag's grouping algorithm, you can set the
|
|
||||||
`GroupingHash` in an `OnBeforeNotify`:
|
|
||||||
|
|
||||||
```go
|
|
||||||
bugsnag.OnBeforeNotify(
|
|
||||||
func (event *bugsnag.Event, config *bugsnag.Configuration) error {
|
|
||||||
event.GroupingHash = calculateGroupingHash(event)
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
```
|
|
||||||
|
|
||||||
### Skipping lines in stacktrace
|
|
||||||
|
|
||||||
If you have your own logging wrapper all of your errors will appear to
|
|
||||||
originate from inside it. You can avoid this problem by constructing
|
|
||||||
an error with a stacktrace manually, and then passing that to Bugsnag.notify:
|
|
||||||
|
|
||||||
```go
|
|
||||||
import (
|
|
||||||
"github.com/bugsnag/bugsnag-go"
|
|
||||||
"github.com/bugsnag/bugsnag-go/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
func LogError(e error) {
|
|
||||||
// 1 removes one line of stacktrace, so the caller of LogError
|
|
||||||
// will be at the top.
|
|
||||||
e = errors.New(e, 1)
|
|
||||||
bugsnag.Notify(e)
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
|
@ -1,81 +0,0 @@
|
||||||
// +build appengine
|
|
||||||
|
|
||||||
package bugsnag
|
|
||||||
|
|
||||||
import (
|
|
||||||
"appengine"
|
|
||||||
"appengine/urlfetch"
|
|
||||||
"appengine/user"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"net/http"
|
|
||||||
)
|
|
||||||
|
|
||||||
func defaultPanicHandler() {}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
OnBeforeNotify(appengineMiddleware)
|
|
||||||
}
|
|
||||||
|
|
||||||
func appengineMiddleware(event *Event, config *Configuration) (err error) {
|
|
||||||
var c appengine.Context
|
|
||||||
|
|
||||||
for _, datum := range event.RawData {
|
|
||||||
if r, ok := datum.(*http.Request); ok {
|
|
||||||
c = appengine.NewContext(r)
|
|
||||||
break
|
|
||||||
} else if context, ok := datum.(appengine.Context); ok {
|
|
||||||
c = context
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if c == nil {
|
|
||||||
return fmt.Errorf("No appengine context given")
|
|
||||||
}
|
|
||||||
|
|
||||||
// You can only use the builtin http library if you pay for appengine,
|
|
||||||
// so we use the appengine urlfetch service instead.
|
|
||||||
config.Transport = &urlfetch.Transport{
|
|
||||||
Context: c,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Anything written to stderr/stdout is discarded, so lets log to the request.
|
|
||||||
|
|
||||||
if configuredLogger, ok := config.Logger.(*log.Logger); ok {
|
|
||||||
config.Logger = log.New(appengineWriter{c}, configuredLogger.Prefix(), configuredLogger.Flags())
|
|
||||||
} else {
|
|
||||||
config.Logger = log.New(appengineWriter{c}, log.Prefix(), log.Flags())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set the releaseStage appropriately
|
|
||||||
if config.ReleaseStage == "" {
|
|
||||||
if appengine.IsDevAppServer() {
|
|
||||||
config.ReleaseStage = "development"
|
|
||||||
} else {
|
|
||||||
config.ReleaseStage = "production"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if event.User == nil {
|
|
||||||
u := user.Current(c)
|
|
||||||
if u != nil {
|
|
||||||
event.User = &User{
|
|
||||||
Id: u.ID,
|
|
||||||
Email: u.Email,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert an appengine.Context into an io.Writer so we can create a log.Logger.
|
|
||||||
type appengineWriter struct {
|
|
||||||
appengine.Context
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c appengineWriter) Write(b []byte) (int, error) {
|
|
||||||
c.Warningf(string(b))
|
|
||||||
return len(b), nil
|
|
||||||
}
|
|
|
@ -1,131 +0,0 @@
|
||||||
package bugsnag
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/bugsnag/bugsnag-go/errors"
|
|
||||||
"log"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
// Fixes a bug with SHA-384 intermediate certs on some platforms.
|
|
||||||
// - https://github.com/bugsnag/bugsnag-go/issues/9
|
|
||||||
_ "crypto/sha512"
|
|
||||||
)
|
|
||||||
|
|
||||||
// The current version of bugsnag-go.
|
|
||||||
const VERSION = "1.0.3"
|
|
||||||
|
|
||||||
var once sync.Once
|
|
||||||
var middleware middlewareStack
|
|
||||||
|
|
||||||
// The configuration for the default bugsnag notifier.
|
|
||||||
var Config Configuration
|
|
||||||
|
|
||||||
var defaultNotifier = Notifier{&Config, nil}
|
|
||||||
|
|
||||||
// Configure Bugsnag. The only required setting is the APIKey, which can be
|
|
||||||
// obtained by clicking on "Settings" in your Bugsnag dashboard. This function
|
|
||||||
// is also responsible for installing the global panic handler, so it should be
|
|
||||||
// called as early as possible in your initialization process.
|
|
||||||
func Configure(config Configuration) {
|
|
||||||
Config.update(&config)
|
|
||||||
once.Do(Config.PanicHandler)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Notify sends an error to Bugsnag along with the current stack trace. The
|
|
||||||
// rawData is used to send extra information along with the error. For example
|
|
||||||
// you can pass the current http.Request to Bugsnag to see information about it
|
|
||||||
// in the dashboard, or set the severity of the notification.
|
|
||||||
func Notify(err error, rawData ...interface{}) error {
|
|
||||||
return defaultNotifier.Notify(errors.New(err, 1), rawData...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AutoNotify logs a panic on a goroutine and then repanics.
|
|
||||||
// It should only be used in places that have existing panic handlers further
|
|
||||||
// up the stack. See bugsnag.Recover(). The rawData is used to send extra
|
|
||||||
// information along with any panics that are handled this way.
|
|
||||||
// Usage: defer bugsnag.AutoNotify()
|
|
||||||
func AutoNotify(rawData ...interface{}) {
|
|
||||||
if err := recover(); err != nil {
|
|
||||||
rawData = defaultNotifier.addDefaultSeverity(rawData, SeverityError)
|
|
||||||
defaultNotifier.Notify(errors.New(err, 2), rawData...)
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Recover logs a panic on a goroutine and then recovers.
|
|
||||||
// The rawData is used to send extra information along with
|
|
||||||
// any panics that are handled this way
|
|
||||||
// Usage: defer bugsnag.Recover()
|
|
||||||
func Recover(rawData ...interface{}) {
|
|
||||||
if err := recover(); err != nil {
|
|
||||||
rawData = defaultNotifier.addDefaultSeverity(rawData, SeverityWarning)
|
|
||||||
defaultNotifier.Notify(errors.New(err, 2), rawData...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnBeforeNotify adds a callback to be run before a notification is sent to
|
|
||||||
// Bugsnag. It can be used to modify the event or its MetaData. Changes made
|
|
||||||
// to the configuration are local to notifying about this event. To prevent the
|
|
||||||
// event from being sent to Bugsnag return an error, this error will be
|
|
||||||
// returned from bugsnag.Notify() and the event will not be sent.
|
|
||||||
func OnBeforeNotify(callback func(event *Event, config *Configuration) error) {
|
|
||||||
middleware.OnBeforeNotify(callback)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handler creates an http Handler that notifies Bugsnag any panics that
|
|
||||||
// happen. It then repanics so that the default http Server panic handler can
|
|
||||||
// handle the panic too. The rawData is used to send extra information along
|
|
||||||
// with any panics that are handled this way.
|
|
||||||
func Handler(h http.Handler, rawData ...interface{}) http.Handler {
|
|
||||||
notifier := New(rawData...)
|
|
||||||
if h == nil {
|
|
||||||
h = http.DefaultServeMux
|
|
||||||
}
|
|
||||||
|
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
defer notifier.AutoNotify(r)
|
|
||||||
h.ServeHTTP(w, r)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandlerFunc creates an http HandlerFunc that notifies Bugsnag about any
|
|
||||||
// panics that happen. It then repanics so that the default http Server panic
|
|
||||||
// handler can handle the panic too. The rawData is used to send extra
|
|
||||||
// information along with any panics that are handled this way. If you have
|
|
||||||
// already wrapped your http server using bugsnag.Handler() you don't also need
|
|
||||||
// to wrap each HandlerFunc.
|
|
||||||
func HandlerFunc(h http.HandlerFunc, rawData ...interface{}) http.HandlerFunc {
|
|
||||||
notifier := New(rawData...)
|
|
||||||
|
|
||||||
return func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
defer notifier.AutoNotify(r)
|
|
||||||
h(w, r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
// Set up builtin middlewarez
|
|
||||||
OnBeforeNotify(httpRequestMiddleware)
|
|
||||||
|
|
||||||
// Default configuration
|
|
||||||
Config.update(&Configuration{
|
|
||||||
APIKey: "",
|
|
||||||
Endpoint: "https://notify.bugsnag.com/",
|
|
||||||
Hostname: "",
|
|
||||||
AppVersion: "",
|
|
||||||
ReleaseStage: "",
|
|
||||||
ParamsFilters: []string{"password", "secret"},
|
|
||||||
// * for app-engine
|
|
||||||
ProjectPackages: []string{"main*"},
|
|
||||||
NotifyReleaseStages: nil,
|
|
||||||
Logger: log.New(os.Stdout, log.Prefix(), log.Flags()),
|
|
||||||
PanicHandler: defaultPanicHandler,
|
|
||||||
Transport: http.DefaultTransport,
|
|
||||||
})
|
|
||||||
|
|
||||||
hostname, err := os.Hostname()
|
|
||||||
if err == nil {
|
|
||||||
Config.Hostname = hostname
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,161 +0,0 @@
|
||||||
package bugsnag
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
"net/http"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Configuration sets up and customizes communication with the Bugsnag API.
|
|
||||||
type Configuration struct {
|
|
||||||
// Your Bugsnag API key, e.g. "c9d60ae4c7e70c4b6c4ebd3e8056d2b8". You can
|
|
||||||
// find this by clicking Settings on https://bugsnag.com/.
|
|
||||||
APIKey string
|
|
||||||
// The Endpoint to notify about crashes. This defaults to
|
|
||||||
// "https://notify.bugsnag.com/", if you're using Bugsnag Enterprise then
|
|
||||||
// set it to your internal Bugsnag endpoint.
|
|
||||||
Endpoint string
|
|
||||||
|
|
||||||
// The current release stage. This defaults to "production" and is used to
|
|
||||||
// filter errors in the Bugsnag dashboard.
|
|
||||||
ReleaseStage string
|
|
||||||
// The currently running version of the app. This is used to filter errors
|
|
||||||
// in the Bugsnag dasboard. If you set this then Bugsnag will only re-open
|
|
||||||
// resolved errors if they happen in different app versions.
|
|
||||||
AppVersion string
|
|
||||||
// The hostname of the current server. This defaults to the return value of
|
|
||||||
// os.Hostname() and is graphed in the Bugsnag dashboard.
|
|
||||||
Hostname string
|
|
||||||
|
|
||||||
// The Release stages to notify in. If you set this then bugsnag-go will
|
|
||||||
// only send notifications to Bugsnag if the ReleaseStage is listed here.
|
|
||||||
NotifyReleaseStages []string
|
|
||||||
|
|
||||||
// packages that are part of your app. Bugsnag uses this to determine how
|
|
||||||
// to group errors and how to display them on your dashboard. You should
|
|
||||||
// include any packages that are part of your app, and exclude libraries
|
|
||||||
// and helpers. You can list wildcards here, and they'll be expanded using
|
|
||||||
// filepath.Glob. The default value is []string{"main*"}
|
|
||||||
ProjectPackages []string
|
|
||||||
|
|
||||||
// Any meta-data that matches these filters will be marked as [REDACTED]
|
|
||||||
// before sending a Notification to Bugsnag. It defaults to
|
|
||||||
// []string{"password", "secret"} so that request parameters like password,
|
|
||||||
// password_confirmation and auth_secret will not be sent to Bugsnag.
|
|
||||||
ParamsFilters []string
|
|
||||||
|
|
||||||
// The PanicHandler is used by Bugsnag to catch unhandled panics in your
|
|
||||||
// application. The default panicHandler uses mitchellh's panicwrap library,
|
|
||||||
// and you can disable this feature by passing an empty: func() {}
|
|
||||||
PanicHandler func()
|
|
||||||
|
|
||||||
// The logger that Bugsnag should log to. Uses the same defaults as go's
|
|
||||||
// builtin logging package. bugsnag-go logs whenever it notifies Bugsnag
|
|
||||||
// of an error, and when any error occurs inside the library itself.
|
|
||||||
Logger interface {
|
|
||||||
Printf(format string, v ...interface{}) // limited to the functions used
|
|
||||||
}
|
|
||||||
// The http Transport to use, defaults to the default http Transport. This
|
|
||||||
// can be configured if you are in an environment like Google App Engine
|
|
||||||
// that has stringent conditions on making http requests.
|
|
||||||
Transport http.RoundTripper
|
|
||||||
// Whether bugsnag should notify synchronously. This defaults to false which
|
|
||||||
// causes bugsnag-go to spawn a new goroutine for each notification.
|
|
||||||
Synchronous bool
|
|
||||||
// TODO: remember to update the update() function when modifying this struct
|
|
||||||
}
|
|
||||||
|
|
||||||
func (config *Configuration) update(other *Configuration) *Configuration {
|
|
||||||
if other.APIKey != "" {
|
|
||||||
config.APIKey = other.APIKey
|
|
||||||
}
|
|
||||||
if other.Endpoint != "" {
|
|
||||||
config.Endpoint = other.Endpoint
|
|
||||||
}
|
|
||||||
if other.Hostname != "" {
|
|
||||||
config.Hostname = other.Hostname
|
|
||||||
}
|
|
||||||
if other.AppVersion != "" {
|
|
||||||
config.AppVersion = other.AppVersion
|
|
||||||
}
|
|
||||||
if other.ReleaseStage != "" {
|
|
||||||
config.ReleaseStage = other.ReleaseStage
|
|
||||||
}
|
|
||||||
if other.ParamsFilters != nil {
|
|
||||||
config.ParamsFilters = other.ParamsFilters
|
|
||||||
}
|
|
||||||
if other.ProjectPackages != nil {
|
|
||||||
config.ProjectPackages = other.ProjectPackages
|
|
||||||
}
|
|
||||||
if other.Logger != nil {
|
|
||||||
config.Logger = other.Logger
|
|
||||||
}
|
|
||||||
if other.NotifyReleaseStages != nil {
|
|
||||||
config.NotifyReleaseStages = other.NotifyReleaseStages
|
|
||||||
}
|
|
||||||
if other.PanicHandler != nil {
|
|
||||||
config.PanicHandler = other.PanicHandler
|
|
||||||
}
|
|
||||||
if other.Transport != nil {
|
|
||||||
config.Transport = other.Transport
|
|
||||||
}
|
|
||||||
if other.Synchronous {
|
|
||||||
config.Synchronous = true
|
|
||||||
}
|
|
||||||
|
|
||||||
return config
|
|
||||||
}
|
|
||||||
|
|
||||||
func (config *Configuration) merge(other *Configuration) *Configuration {
|
|
||||||
return config.clone().update(other)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (config *Configuration) clone() *Configuration {
|
|
||||||
clone := *config
|
|
||||||
return &clone
|
|
||||||
}
|
|
||||||
|
|
||||||
func (config *Configuration) isProjectPackage(pkg string) bool {
|
|
||||||
for _, p := range config.ProjectPackages {
|
|
||||||
if match, _ := filepath.Match(p, pkg); match {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (config *Configuration) stripProjectPackages(file string) string {
|
|
||||||
for _, p := range config.ProjectPackages {
|
|
||||||
if len(p) > 2 && p[len(p)-2] == '/' && p[len(p)-1] == '*' {
|
|
||||||
p = p[:len(p)-1]
|
|
||||||
} else {
|
|
||||||
p = p + "/"
|
|
||||||
}
|
|
||||||
if strings.HasPrefix(file, p) {
|
|
||||||
return strings.TrimPrefix(file, p)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return file
|
|
||||||
}
|
|
||||||
|
|
||||||
func (config *Configuration) log(fmt string, args ...interface{}) {
|
|
||||||
if config != nil && config.Logger != nil {
|
|
||||||
config.Logger.Printf(fmt, args...)
|
|
||||||
} else {
|
|
||||||
log.Printf(fmt, args...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (config *Configuration) notifyInReleaseStage() bool {
|
|
||||||
if config.NotifyReleaseStages == nil {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
for _, r := range config.NotifyReleaseStages {
|
|
||||||
if r == config.ReleaseStage {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
|
@ -1,69 +0,0 @@
|
||||||
/*
|
|
||||||
Package bugsnag captures errors in real-time and reports them to Bugsnag (http://bugsnag.com).
|
|
||||||
|
|
||||||
Using bugsnag-go is a three-step process.
|
|
||||||
|
|
||||||
1. As early as possible in your program configure the notifier with your APIKey. This sets up
|
|
||||||
handling of panics that would otherwise crash your app.
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
bugsnag.Configure(bugsnag.Configuration{
|
|
||||||
APIKey: "YOUR_API_KEY_HERE",
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
2. Add bugsnag to places that already catch panics. For example you should add it to the HTTP server
|
|
||||||
when you call ListenAndServer:
|
|
||||||
|
|
||||||
http.ListenAndServe(":8080", bugsnag.Handler(nil))
|
|
||||||
|
|
||||||
If that's not possible, for example because you're using Google App Engine, you can also wrap each
|
|
||||||
HTTP handler manually:
|
|
||||||
|
|
||||||
http.HandleFunc("/" bugsnag.HandlerFunc(func (w http.ResponseWriter, r *http.Request) {
|
|
||||||
...
|
|
||||||
})
|
|
||||||
|
|
||||||
3. To notify Bugsnag of an error that is not a panic, pass it to bugsnag.Notify. This will also
|
|
||||||
log the error message using the configured Logger.
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
bugsnag.Notify(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
For detailed integration instructions see https://bugsnag.com/docs/notifiers/go.
|
|
||||||
|
|
||||||
Configuration
|
|
||||||
|
|
||||||
The only required configuration is the Bugsnag API key which can be obtained by clicking "Settings"
|
|
||||||
on the top of https://bugsnag.com/ after signing up. We also recommend you set the ReleaseStage
|
|
||||||
and AppVersion if these make sense for your deployment workflow.
|
|
||||||
|
|
||||||
RawData
|
|
||||||
|
|
||||||
If you need to attach extra data to Bugsnag notifications you can do that using
|
|
||||||
the rawData mechanism. Most of the functions that send errors to Bugsnag allow
|
|
||||||
you to pass in any number of interface{} values as rawData. The rawData can
|
|
||||||
consist of the Severity, Context, User or MetaData types listed below, and
|
|
||||||
there is also builtin support for *http.Requests.
|
|
||||||
|
|
||||||
bugsnag.Notify(err, bugsnag.SeverityError)
|
|
||||||
|
|
||||||
If you want to add custom tabs to your bugsnag dashboard you can pass any value in as rawData,
|
|
||||||
and then process it into the event's metadata using a bugsnag.OnBeforeNotify() hook.
|
|
||||||
|
|
||||||
bugsnag.Notify(err, account)
|
|
||||||
|
|
||||||
bugsnag.OnBeforeNotify(func (e *bugsnag.Event, c *bugsnag.Configuration) {
|
|
||||||
for datum := range e.RawData {
|
|
||||||
if account, ok := datum.(Account); ok {
|
|
||||||
e.MetaData.Add("account", "name", account.Name)
|
|
||||||
e.MetaData.Add("account", "url", account.URL)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
If necessary you can pass Configuration in as rawData, or modify the Configuration object passed
|
|
||||||
into OnBeforeNotify hooks. Configuration passed in this way only affects the current notification.
|
|
||||||
*/
|
|
||||||
package bugsnag
|
|
|
@ -1,6 +0,0 @@
|
||||||
Adds stacktraces to errors in golang.
|
|
||||||
|
|
||||||
This was made to help build the Bugsnag notifier but can be used standalone if
|
|
||||||
you like to have stacktraces on errors.
|
|
||||||
|
|
||||||
See [Godoc](https://godoc.org/github.com/bugsnag/bugsnag-go/errors) for the API docs.
|
|
|
@ -1,90 +0,0 @@
|
||||||
// Package errors provides errors that have stack-traces.
|
|
||||||
package errors
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
// The maximum number of stackframes on any error.
|
|
||||||
var MaxStackDepth = 50
|
|
||||||
|
|
||||||
// Error is an error with an attached stacktrace. It can be used
|
|
||||||
// wherever the builtin error interface is expected.
|
|
||||||
type Error struct {
|
|
||||||
Err error
|
|
||||||
stack []uintptr
|
|
||||||
frames []StackFrame
|
|
||||||
}
|
|
||||||
|
|
||||||
// New makes an Error from the given value. If that value is already an
|
|
||||||
// error then it will be used directly, if not, it will be passed to
|
|
||||||
// fmt.Errorf("%v"). The skip parameter indicates how far up the stack
|
|
||||||
// to start the stacktrace. 0 is from the current call, 1 from its caller, etc.
|
|
||||||
func New(e interface{}, skip int) *Error {
|
|
||||||
var err error
|
|
||||||
|
|
||||||
switch e := e.(type) {
|
|
||||||
case *Error:
|
|
||||||
return e
|
|
||||||
case error:
|
|
||||||
err = e
|
|
||||||
default:
|
|
||||||
err = fmt.Errorf("%v", e)
|
|
||||||
}
|
|
||||||
|
|
||||||
stack := make([]uintptr, MaxStackDepth)
|
|
||||||
length := runtime.Callers(2+skip, stack[:])
|
|
||||||
return &Error{
|
|
||||||
Err: err,
|
|
||||||
stack: stack[:length],
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Errorf creates a new error with the given message. You can use it
|
|
||||||
// as a drop-in replacement for fmt.Errorf() to provide descriptive
|
|
||||||
// errors in return values.
|
|
||||||
func Errorf(format string, a ...interface{}) *Error {
|
|
||||||
return New(fmt.Errorf(format, a...), 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error returns the underlying error's message.
|
|
||||||
func (err *Error) Error() string {
|
|
||||||
return err.Err.Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stack returns the callstack formatted the same way that go does
|
|
||||||
// in runtime/debug.Stack()
|
|
||||||
func (err *Error) Stack() []byte {
|
|
||||||
buf := bytes.Buffer{}
|
|
||||||
|
|
||||||
for _, frame := range err.StackFrames() {
|
|
||||||
buf.WriteString(frame.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
return buf.Bytes()
|
|
||||||
}
|
|
||||||
|
|
||||||
// StackFrames returns an array of frames containing information about the
|
|
||||||
// stack.
|
|
||||||
func (err *Error) StackFrames() []StackFrame {
|
|
||||||
if err.frames == nil {
|
|
||||||
err.frames = make([]StackFrame, len(err.stack))
|
|
||||||
|
|
||||||
for i, pc := range err.stack {
|
|
||||||
err.frames[i] = NewStackFrame(pc)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return err.frames
|
|
||||||
}
|
|
||||||
|
|
||||||
// TypeName returns the type this error. e.g. *errors.stringError.
|
|
||||||
func (err *Error) TypeName() string {
|
|
||||||
if _, ok := err.Err.(uncaughtPanic); ok {
|
|
||||||
return "panic"
|
|
||||||
}
|
|
||||||
return reflect.TypeOf(err.Err).String()
|
|
||||||
}
|
|
|
@ -1,127 +0,0 @@
|
||||||
package errors
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
type uncaughtPanic struct{ message string }
|
|
||||||
|
|
||||||
func (p uncaughtPanic) Error() string {
|
|
||||||
return p.message
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParsePanic allows you to get an error object from the output of a go program
|
|
||||||
// that panicked. This is particularly useful with https://github.com/mitchellh/panicwrap.
|
|
||||||
func ParsePanic(text string) (*Error, error) {
|
|
||||||
lines := strings.Split(text, "\n")
|
|
||||||
|
|
||||||
state := "start"
|
|
||||||
|
|
||||||
var message string
|
|
||||||
var stack []StackFrame
|
|
||||||
|
|
||||||
for i := 0; i < len(lines); i++ {
|
|
||||||
line := lines[i]
|
|
||||||
|
|
||||||
if state == "start" {
|
|
||||||
if strings.HasPrefix(line, "panic: ") {
|
|
||||||
message = strings.TrimPrefix(line, "panic: ")
|
|
||||||
state = "seek"
|
|
||||||
} else {
|
|
||||||
return nil, Errorf("bugsnag.panicParser: Invalid line (no prefix): %s", line)
|
|
||||||
}
|
|
||||||
|
|
||||||
} else if state == "seek" {
|
|
||||||
if strings.HasPrefix(line, "goroutine ") && strings.HasSuffix(line, "[running]:") {
|
|
||||||
state = "parsing"
|
|
||||||
}
|
|
||||||
|
|
||||||
} else if state == "parsing" {
|
|
||||||
if line == "" {
|
|
||||||
state = "done"
|
|
||||||
break
|
|
||||||
}
|
|
||||||
createdBy := false
|
|
||||||
if strings.HasPrefix(line, "created by ") {
|
|
||||||
line = strings.TrimPrefix(line, "created by ")
|
|
||||||
createdBy = true
|
|
||||||
}
|
|
||||||
|
|
||||||
i++
|
|
||||||
|
|
||||||
if i >= len(lines) {
|
|
||||||
return nil, Errorf("bugsnag.panicParser: Invalid line (unpaired): %s", line)
|
|
||||||
}
|
|
||||||
|
|
||||||
frame, err := parsePanicFrame(line, lines[i], createdBy)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
stack = append(stack, *frame)
|
|
||||||
if createdBy {
|
|
||||||
state = "done"
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if state == "done" || state == "parsing" {
|
|
||||||
return &Error{Err: uncaughtPanic{message}, frames: stack}, nil
|
|
||||||
}
|
|
||||||
return nil, Errorf("could not parse panic: %v", text)
|
|
||||||
}
|
|
||||||
|
|
||||||
// The lines we're passing look like this:
|
|
||||||
//
|
|
||||||
// main.(*foo).destruct(0xc208067e98)
|
|
||||||
// /0/go/src/github.com/bugsnag/bugsnag-go/pan/main.go:22 +0x151
|
|
||||||
func parsePanicFrame(name string, line string, createdBy bool) (*StackFrame, error) {
|
|
||||||
idx := strings.LastIndex(name, "(")
|
|
||||||
if idx == -1 && !createdBy {
|
|
||||||
return nil, Errorf("bugsnag.panicParser: Invalid line (no call): %s", name)
|
|
||||||
}
|
|
||||||
if idx != -1 {
|
|
||||||
name = name[:idx]
|
|
||||||
}
|
|
||||||
pkg := ""
|
|
||||||
|
|
||||||
if lastslash := strings.LastIndex(name, "/"); lastslash >= 0 {
|
|
||||||
pkg += name[:lastslash] + "/"
|
|
||||||
name = name[lastslash+1:]
|
|
||||||
}
|
|
||||||
if period := strings.Index(name, "."); period >= 0 {
|
|
||||||
pkg += name[:period]
|
|
||||||
name = name[period+1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
name = strings.Replace(name, "·", ".", -1)
|
|
||||||
|
|
||||||
if !strings.HasPrefix(line, "\t") {
|
|
||||||
return nil, Errorf("bugsnag.panicParser: Invalid line (no tab): %s", line)
|
|
||||||
}
|
|
||||||
|
|
||||||
idx = strings.LastIndex(line, ":")
|
|
||||||
if idx == -1 {
|
|
||||||
return nil, Errorf("bugsnag.panicParser: Invalid line (no line number): %s", line)
|
|
||||||
}
|
|
||||||
file := line[1:idx]
|
|
||||||
|
|
||||||
number := line[idx+1:]
|
|
||||||
if idx = strings.Index(number, " +"); idx > -1 {
|
|
||||||
number = number[:idx]
|
|
||||||
}
|
|
||||||
|
|
||||||
lno, err := strconv.ParseInt(number, 10, 32)
|
|
||||||
if err != nil {
|
|
||||||
return nil, Errorf("bugsnag.panicParser: Invalid line (bad line number): %s", line)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &StackFrame{
|
|
||||||
File: file,
|
|
||||||
LineNumber: int(lno),
|
|
||||||
Package: pkg,
|
|
||||||
Name: name,
|
|
||||||
}, nil
|
|
||||||
}
|
|
|
@ -1,97 +0,0 @@
|
||||||
package errors
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A StackFrame contains all necessary information about to generate a line
|
|
||||||
// in a callstack.
|
|
||||||
type StackFrame struct {
|
|
||||||
File string
|
|
||||||
LineNumber int
|
|
||||||
Name string
|
|
||||||
Package string
|
|
||||||
ProgramCounter uintptr
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewStackFrame popoulates a stack frame object from the program counter.
|
|
||||||
func NewStackFrame(pc uintptr) (frame StackFrame) {
|
|
||||||
|
|
||||||
frame = StackFrame{ProgramCounter: pc}
|
|
||||||
if frame.Func() == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
frame.Package, frame.Name = packageAndName(frame.Func())
|
|
||||||
|
|
||||||
// pc -1 because the program counters we use are usually return addresses,
|
|
||||||
// and we want to show the line that corresponds to the function call
|
|
||||||
frame.File, frame.LineNumber = frame.Func().FileLine(pc - 1)
|
|
||||||
return
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// Func returns the function that this stackframe corresponds to
|
|
||||||
func (frame *StackFrame) Func() *runtime.Func {
|
|
||||||
if frame.ProgramCounter == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return runtime.FuncForPC(frame.ProgramCounter)
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the stackframe formatted in the same way as go does
|
|
||||||
// in runtime/debug.Stack()
|
|
||||||
func (frame *StackFrame) String() string {
|
|
||||||
str := fmt.Sprintf("%s:%d (0x%x)\n", frame.File, frame.LineNumber, frame.ProgramCounter)
|
|
||||||
|
|
||||||
source, err := frame.SourceLine()
|
|
||||||
if err != nil {
|
|
||||||
return str
|
|
||||||
}
|
|
||||||
|
|
||||||
return str + fmt.Sprintf("\t%s: %s\n", frame.Name, source)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SourceLine gets the line of code (from File and Line) of the original source if possible
|
|
||||||
func (frame *StackFrame) SourceLine() (string, error) {
|
|
||||||
data, err := ioutil.ReadFile(frame.File)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
lines := bytes.Split(data, []byte{'\n'})
|
|
||||||
if frame.LineNumber <= 0 || frame.LineNumber >= len(lines) {
|
|
||||||
return "???", nil
|
|
||||||
}
|
|
||||||
// -1 because line-numbers are 1 based, but our array is 0 based
|
|
||||||
return string(bytes.Trim(lines[frame.LineNumber-1], " \t")), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func packageAndName(fn *runtime.Func) (string, string) {
|
|
||||||
name := fn.Name()
|
|
||||||
pkg := ""
|
|
||||||
|
|
||||||
// The name includes the path name to the package, which is unnecessary
|
|
||||||
// since the file name is already included. Plus, it has center dots.
|
|
||||||
// That is, we see
|
|
||||||
// runtime/debug.*T·ptrmethod
|
|
||||||
// and want
|
|
||||||
// *T.ptrmethod
|
|
||||||
// Since the package path might contains dots (e.g. code.google.com/...),
|
|
||||||
// we first remove the path prefix if there is one.
|
|
||||||
if lastslash := strings.LastIndex(name, "/"); lastslash >= 0 {
|
|
||||||
pkg += name[:lastslash] + "/"
|
|
||||||
name = name[lastslash+1:]
|
|
||||||
}
|
|
||||||
if period := strings.Index(name, "."); period >= 0 {
|
|
||||||
pkg += name[:period]
|
|
||||||
name = name[period+1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
name = strings.Replace(name, "·", ".", -1)
|
|
||||||
return pkg, name
|
|
||||||
}
|
|
|
@ -1,143 +0,0 @@
|
||||||
package bugsnag
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/bugsnag/bugsnag-go/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Context is the context of the error in Bugsnag.
|
|
||||||
// This can be passed to Notify, Recover or AutoNotify as rawData.
|
|
||||||
type Context struct {
|
|
||||||
String string
|
|
||||||
}
|
|
||||||
|
|
||||||
// User represents the searchable user-data on Bugsnag. The Id is also used
|
|
||||||
// to determine the number of users affected by a bug. This can be
|
|
||||||
// passed to Notify, Recover or AutoNotify as rawData.
|
|
||||||
type User struct {
|
|
||||||
Id string `json:"id,omitempty"`
|
|
||||||
Name string `json:"name,omitempty"`
|
|
||||||
Email string `json:"email,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ErrorClass overrides the error class in Bugsnag.
|
|
||||||
// This struct enables you to group errors as you like.
|
|
||||||
type ErrorClass struct {
|
|
||||||
Name string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sets the severity of the error on Bugsnag. These values can be
|
|
||||||
// passed to Notify, Recover or AutoNotify as rawData.
|
|
||||||
var (
|
|
||||||
SeverityError = severity{"error"}
|
|
||||||
SeverityWarning = severity{"warning"}
|
|
||||||
SeverityInfo = severity{"info"}
|
|
||||||
)
|
|
||||||
|
|
||||||
// The severity tag type, private so that people can only use Error,Warning,Info
|
|
||||||
type severity struct {
|
|
||||||
String string
|
|
||||||
}
|
|
||||||
|
|
||||||
// The form of stacktrace that Bugsnag expects
|
|
||||||
type stackFrame struct {
|
|
||||||
Method string `json:"method"`
|
|
||||||
File string `json:"file"`
|
|
||||||
LineNumber int `json:"lineNumber"`
|
|
||||||
InProject bool `json:"inProject,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Event represents a payload of data that gets sent to Bugsnag.
|
|
||||||
// This is passed to each OnBeforeNotify hook.
|
|
||||||
type Event struct {
|
|
||||||
|
|
||||||
// The original error that caused this event, not sent to Bugsnag.
|
|
||||||
Error *errors.Error
|
|
||||||
|
|
||||||
// The rawData affecting this error, not sent to Bugsnag.
|
|
||||||
RawData []interface{}
|
|
||||||
|
|
||||||
// The error class to be sent to Bugsnag. This defaults to the type name of the Error, for
|
|
||||||
// example *error.String
|
|
||||||
ErrorClass string
|
|
||||||
// The error message to be sent to Bugsnag. This defaults to the return value of Error.Error()
|
|
||||||
Message string
|
|
||||||
// The stacktrrace of the error to be sent to Bugsnag.
|
|
||||||
Stacktrace []stackFrame
|
|
||||||
|
|
||||||
// The context to be sent to Bugsnag. This should be set to the part of the app that was running,
|
|
||||||
// e.g. for http requests, set it to the path.
|
|
||||||
Context string
|
|
||||||
// The severity of the error. Can be SeverityError, SeverityWarning or SeverityInfo.
|
|
||||||
Severity severity
|
|
||||||
// The grouping hash is used to override Bugsnag's grouping. Set this if you'd like all errors with
|
|
||||||
// the same grouping hash to group together in the dashboard.
|
|
||||||
GroupingHash string
|
|
||||||
|
|
||||||
// User data to send to Bugsnag. This is searchable on the dashboard.
|
|
||||||
User *User
|
|
||||||
// Other MetaData to send to Bugsnag. Appears as a set of tabbed tables in the dashboard.
|
|
||||||
MetaData MetaData
|
|
||||||
}
|
|
||||||
|
|
||||||
func newEvent(err *errors.Error, rawData []interface{}, notifier *Notifier) (*Event, *Configuration) {
|
|
||||||
|
|
||||||
config := notifier.Config
|
|
||||||
event := &Event{
|
|
||||||
Error: err,
|
|
||||||
RawData: append(notifier.RawData, rawData...),
|
|
||||||
|
|
||||||
ErrorClass: err.TypeName(),
|
|
||||||
Message: err.Error(),
|
|
||||||
Stacktrace: make([]stackFrame, len(err.StackFrames())),
|
|
||||||
|
|
||||||
Severity: SeverityWarning,
|
|
||||||
|
|
||||||
MetaData: make(MetaData),
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, datum := range event.RawData {
|
|
||||||
switch datum := datum.(type) {
|
|
||||||
case severity:
|
|
||||||
event.Severity = datum
|
|
||||||
|
|
||||||
case Context:
|
|
||||||
event.Context = datum.String
|
|
||||||
|
|
||||||
case Configuration:
|
|
||||||
config = config.merge(&datum)
|
|
||||||
|
|
||||||
case MetaData:
|
|
||||||
event.MetaData.Update(datum)
|
|
||||||
|
|
||||||
case User:
|
|
||||||
event.User = &datum
|
|
||||||
|
|
||||||
case ErrorClass:
|
|
||||||
event.ErrorClass = datum.Name
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, frame := range err.StackFrames() {
|
|
||||||
file := frame.File
|
|
||||||
inProject := config.isProjectPackage(frame.Package)
|
|
||||||
|
|
||||||
// remove $GOROOT and $GOHOME from other frames
|
|
||||||
if idx := strings.Index(file, frame.Package); idx > -1 {
|
|
||||||
file = file[idx:]
|
|
||||||
}
|
|
||||||
if inProject {
|
|
||||||
file = config.stripProjectPackages(file)
|
|
||||||
}
|
|
||||||
|
|
||||||
event.Stacktrace[i] = stackFrame{
|
|
||||||
Method: frame.Name,
|
|
||||||
File: file,
|
|
||||||
LineNumber: frame.LineNumber,
|
|
||||||
InProject: inProject,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return event, config
|
|
||||||
}
|
|
|
@ -1,43 +0,0 @@
|
||||||
// The code is stripped from:
|
|
||||||
// http://golang.org/src/pkg/encoding/json/tags.go?m=text
|
|
||||||
|
|
||||||
package bugsnag
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// tagOptions is the string following a comma in a struct field's "json"
|
|
||||||
// tag, or the empty string. It does not include the leading comma.
|
|
||||||
type tagOptions string
|
|
||||||
|
|
||||||
// parseTag splits a struct field's json tag into its name and
|
|
||||||
// comma-separated options.
|
|
||||||
func parseTag(tag string) (string, tagOptions) {
|
|
||||||
if idx := strings.Index(tag, ","); idx != -1 {
|
|
||||||
return tag[:idx], tagOptions(tag[idx+1:])
|
|
||||||
}
|
|
||||||
return tag, tagOptions("")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Contains reports whether a comma-separated list of options
|
|
||||||
// contains a particular substr flag. substr must be surrounded by a
|
|
||||||
// string boundary or commas.
|
|
||||||
func (o tagOptions) Contains(optionName string) bool {
|
|
||||||
if len(o) == 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
s := string(o)
|
|
||||||
for s != "" {
|
|
||||||
var next string
|
|
||||||
i := strings.Index(s, ",")
|
|
||||||
if i >= 0 {
|
|
||||||
s, next = s[:i], s[i+1:]
|
|
||||||
}
|
|
||||||
if s == optionName {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
s = next
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
|
@ -1,189 +0,0 @@
|
||||||
package bugsnag
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// MetaData is added to the Bugsnag dashboard in tabs. Each tab is
|
|
||||||
// a map of strings -> values. You can pass MetaData to Notify, Recover
|
|
||||||
// and AutoNotify as rawData.
|
|
||||||
type MetaData map[string]map[string]interface{}
|
|
||||||
|
|
||||||
// Update the meta-data with more information. Tabs are merged together such
|
|
||||||
// that unique keys from both sides are preserved, and duplicate keys end up
|
|
||||||
// with the provided values.
|
|
||||||
func (meta MetaData) Update(other MetaData) {
|
|
||||||
for name, tab := range other {
|
|
||||||
|
|
||||||
if meta[name] == nil {
|
|
||||||
meta[name] = make(map[string]interface{})
|
|
||||||
}
|
|
||||||
|
|
||||||
for key, value := range tab {
|
|
||||||
meta[name][key] = value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add creates a tab of Bugsnag meta-data.
|
|
||||||
// If the tab doesn't yet exist it will be created.
|
|
||||||
// If the key already exists, it will be overwritten.
|
|
||||||
func (meta MetaData) Add(tab string, key string, value interface{}) {
|
|
||||||
if meta[tab] == nil {
|
|
||||||
meta[tab] = make(map[string]interface{})
|
|
||||||
}
|
|
||||||
|
|
||||||
meta[tab][key] = value
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddStruct creates a tab of Bugsnag meta-data.
|
|
||||||
// The struct will be converted to an Object using the
|
|
||||||
// reflect library so any private fields will not be exported.
|
|
||||||
// As a safety measure, if you pass a non-struct the value will be
|
|
||||||
// sent to Bugsnag under the "Extra data" tab.
|
|
||||||
func (meta MetaData) AddStruct(tab string, obj interface{}) {
|
|
||||||
val := sanitizer{}.Sanitize(obj)
|
|
||||||
content, ok := val.(map[string]interface{})
|
|
||||||
if ok {
|
|
||||||
meta[tab] = content
|
|
||||||
} else {
|
|
||||||
// Wasn't a struct
|
|
||||||
meta.Add("Extra data", tab, obj)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove any values from meta-data that have keys matching the filters,
|
|
||||||
// and any that are recursive data-structures
|
|
||||||
func (meta MetaData) sanitize(filters []string) interface{} {
|
|
||||||
return sanitizer{
|
|
||||||
Filters: filters,
|
|
||||||
Seen: make([]interface{}, 0),
|
|
||||||
}.Sanitize(meta)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// The sanitizer is used to remove filtered params and recursion from meta-data.
|
|
||||||
type sanitizer struct {
|
|
||||||
Filters []string
|
|
||||||
Seen []interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s sanitizer) Sanitize(data interface{}) interface{} {
|
|
||||||
for _, s := range s.Seen {
|
|
||||||
// TODO: we don't need deep equal here, just type-ignoring equality
|
|
||||||
if reflect.DeepEqual(data, s) {
|
|
||||||
return "[RECURSION]"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sanitizers are passed by value, so we can modify s and it only affects
|
|
||||||
// s.Seen for nested calls.
|
|
||||||
s.Seen = append(s.Seen, data)
|
|
||||||
|
|
||||||
t := reflect.TypeOf(data)
|
|
||||||
v := reflect.ValueOf(data)
|
|
||||||
|
|
||||||
if t == nil {
|
|
||||||
return "<nil>"
|
|
||||||
}
|
|
||||||
|
|
||||||
switch t.Kind() {
|
|
||||||
case reflect.Bool,
|
|
||||||
reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
|
||||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
|
|
||||||
reflect.Float32, reflect.Float64:
|
|
||||||
return data
|
|
||||||
|
|
||||||
case reflect.String:
|
|
||||||
return data
|
|
||||||
|
|
||||||
case reflect.Interface, reflect.Ptr:
|
|
||||||
return s.Sanitize(v.Elem().Interface())
|
|
||||||
|
|
||||||
case reflect.Array, reflect.Slice:
|
|
||||||
ret := make([]interface{}, v.Len())
|
|
||||||
for i := 0; i < v.Len(); i++ {
|
|
||||||
ret[i] = s.Sanitize(v.Index(i).Interface())
|
|
||||||
}
|
|
||||||
return ret
|
|
||||||
|
|
||||||
case reflect.Map:
|
|
||||||
return s.sanitizeMap(v)
|
|
||||||
|
|
||||||
case reflect.Struct:
|
|
||||||
return s.sanitizeStruct(v, t)
|
|
||||||
|
|
||||||
// Things JSON can't serialize:
|
|
||||||
// case t.Chan, t.Func, reflect.Complex64, reflect.Complex128, reflect.UnsafePointer:
|
|
||||||
default:
|
|
||||||
return "[" + t.String() + "]"
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s sanitizer) sanitizeMap(v reflect.Value) interface{} {
|
|
||||||
ret := make(map[string]interface{})
|
|
||||||
|
|
||||||
for _, key := range v.MapKeys() {
|
|
||||||
val := s.Sanitize(v.MapIndex(key).Interface())
|
|
||||||
newKey := fmt.Sprintf("%v", key.Interface())
|
|
||||||
|
|
||||||
if s.shouldRedact(newKey) {
|
|
||||||
val = "[REDACTED]"
|
|
||||||
}
|
|
||||||
|
|
||||||
ret[newKey] = val
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s sanitizer) sanitizeStruct(v reflect.Value, t reflect.Type) interface{} {
|
|
||||||
ret := make(map[string]interface{})
|
|
||||||
|
|
||||||
for i := 0; i < v.NumField(); i++ {
|
|
||||||
|
|
||||||
val := v.Field(i)
|
|
||||||
// Don't export private fields
|
|
||||||
if !val.CanInterface() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
name := t.Field(i).Name
|
|
||||||
var opts tagOptions
|
|
||||||
|
|
||||||
// Parse JSON tags. Supports name and "omitempty"
|
|
||||||
if jsonTag := t.Field(i).Tag.Get("json"); len(jsonTag) != 0 {
|
|
||||||
name, opts = parseTag(jsonTag)
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.shouldRedact(name) {
|
|
||||||
ret[name] = "[REDACTED]"
|
|
||||||
} else {
|
|
||||||
sanitized := s.Sanitize(val.Interface())
|
|
||||||
if str, ok := sanitized.(string); ok {
|
|
||||||
if !(opts.Contains("omitempty") && len(str) == 0) {
|
|
||||||
ret[name] = str
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
ret[name] = sanitized
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s sanitizer) shouldRedact(key string) bool {
|
|
||||||
for _, filter := range s.Filters {
|
|
||||||
if strings.Contains(strings.ToLower(filter), strings.ToLower(key)) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
|
@ -1,96 +0,0 @@
|
||||||
package bugsnag
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
|
||||||
beforeFunc func(*Event, *Configuration) error
|
|
||||||
|
|
||||||
// MiddlewareStacks keep middleware in the correct order. They are
|
|
||||||
// called in reverse order, so if you add a new middleware it will
|
|
||||||
// be called before all existing middleware.
|
|
||||||
middlewareStack struct {
|
|
||||||
before []beforeFunc
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
// AddMiddleware adds a new middleware to the outside of the existing ones,
|
|
||||||
// when the middlewareStack is Run it will be run before all middleware that
|
|
||||||
// have been added before.
|
|
||||||
func (stack *middlewareStack) OnBeforeNotify(middleware beforeFunc) {
|
|
||||||
stack.before = append(stack.before, middleware)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run causes all the middleware to be run. If they all permit it the next callback
|
|
||||||
// will be called with all the middleware on the stack.
|
|
||||||
func (stack *middlewareStack) Run(event *Event, config *Configuration, next func() error) error {
|
|
||||||
// run all the before filters in reverse order
|
|
||||||
for i := range stack.before {
|
|
||||||
before := stack.before[len(stack.before)-i-1]
|
|
||||||
|
|
||||||
err := stack.runBeforeFilter(before, event, config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return next()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (stack *middlewareStack) runBeforeFilter(f beforeFunc, event *Event, config *Configuration) error {
|
|
||||||
defer func() {
|
|
||||||
if err := recover(); err != nil {
|
|
||||||
config.log("bugsnag/middleware: unexpected panic: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
return f(event, config)
|
|
||||||
}
|
|
||||||
|
|
||||||
// catchMiddlewarePanic is used to log any panics that happen inside Middleware,
|
|
||||||
// we wouldn't want to not notify Bugsnag in this case.
|
|
||||||
func catchMiddlewarePanic(event *Event, config *Configuration, next func() error) {
|
|
||||||
}
|
|
||||||
|
|
||||||
// httpRequestMiddleware is added OnBeforeNotify by default. It takes information
|
|
||||||
// from an http.Request passed in as rawData, and adds it to the Event. You can
|
|
||||||
// use this as a template for writing your own Middleware.
|
|
||||||
func httpRequestMiddleware(event *Event, config *Configuration) error {
|
|
||||||
for _, datum := range event.RawData {
|
|
||||||
if request, ok := datum.(*http.Request); ok {
|
|
||||||
proto := "http://"
|
|
||||||
if request.TLS != nil {
|
|
||||||
proto = "https://"
|
|
||||||
}
|
|
||||||
|
|
||||||
event.MetaData.Update(MetaData{
|
|
||||||
"Request": {
|
|
||||||
"RemoteAddr": request.RemoteAddr,
|
|
||||||
"Method": request.Method,
|
|
||||||
"Url": proto + request.Host + request.RequestURI,
|
|
||||||
"Params": request.URL.Query(),
|
|
||||||
},
|
|
||||||
})
|
|
||||||
|
|
||||||
// Add headers as a separate tab.
|
|
||||||
event.MetaData.AddStruct("Headers", request.Header)
|
|
||||||
|
|
||||||
// Default context to Path
|
|
||||||
if event.Context == "" {
|
|
||||||
event.Context = request.URL.Path
|
|
||||||
}
|
|
||||||
|
|
||||||
// Default user.id to IP so that users-affected works.
|
|
||||||
if event.User == nil {
|
|
||||||
ip := request.RemoteAddr
|
|
||||||
if idx := strings.LastIndex(ip, ":"); idx != -1 {
|
|
||||||
ip = ip[:idx]
|
|
||||||
}
|
|
||||||
event.User = &User{Id: ip}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
|
@ -1,95 +0,0 @@
|
||||||
package bugsnag
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/bugsnag/bugsnag-go/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Notifier sends errors to Bugsnag.
|
|
||||||
type Notifier struct {
|
|
||||||
Config *Configuration
|
|
||||||
RawData []interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// New creates a new notifier.
|
|
||||||
// You can pass an instance of bugsnag.Configuration in rawData to change the configuration.
|
|
||||||
// Other values of rawData will be passed to Notify.
|
|
||||||
func New(rawData ...interface{}) *Notifier {
|
|
||||||
config := Config.clone()
|
|
||||||
for i, datum := range rawData {
|
|
||||||
if c, ok := datum.(Configuration); ok {
|
|
||||||
config.update(&c)
|
|
||||||
rawData[i] = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return &Notifier{
|
|
||||||
Config: config,
|
|
||||||
RawData: rawData,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Notify sends an error to Bugsnag. Any rawData you pass here will be sent to
|
|
||||||
// Bugsnag after being converted to JSON. e.g. bugsnag.SeverityError, bugsnag.Context,
|
|
||||||
// or bugsnag.MetaData.
|
|
||||||
func (notifier *Notifier) Notify(err error, rawData ...interface{}) (e error) {
|
|
||||||
event, config := newEvent(errors.New(err, 1), rawData, notifier)
|
|
||||||
|
|
||||||
// Never block, start throwing away errors if we have too many.
|
|
||||||
e = middleware.Run(event, config, func() error {
|
|
||||||
config.log("notifying bugsnag: %s", event.Message)
|
|
||||||
if config.notifyInReleaseStage() {
|
|
||||||
if config.Synchronous {
|
|
||||||
return (&payload{event, config}).deliver()
|
|
||||||
}
|
|
||||||
go (&payload{event, config}).deliver()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return fmt.Errorf("not notifying in %s", config.ReleaseStage)
|
|
||||||
})
|
|
||||||
|
|
||||||
if e != nil {
|
|
||||||
config.log("bugsnag.Notify: %v", e)
|
|
||||||
}
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
// AutoNotify notifies Bugsnag of any panics, then repanics.
|
|
||||||
// It sends along any rawData that gets passed in.
|
|
||||||
// Usage: defer AutoNotify()
|
|
||||||
func (notifier *Notifier) AutoNotify(rawData ...interface{}) {
|
|
||||||
if err := recover(); err != nil {
|
|
||||||
rawData = notifier.addDefaultSeverity(rawData, SeverityError)
|
|
||||||
notifier.Notify(errors.New(err, 2), rawData...)
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Recover logs any panics, then recovers.
|
|
||||||
// It sends along any rawData that gets passed in.
|
|
||||||
// Usage: defer Recover()
|
|
||||||
func (notifier *Notifier) Recover(rawData ...interface{}) {
|
|
||||||
if err := recover(); err != nil {
|
|
||||||
rawData = notifier.addDefaultSeverity(rawData, SeverityWarning)
|
|
||||||
notifier.Notify(errors.New(err, 2), rawData...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (notifier *Notifier) dontPanic() {
|
|
||||||
if err := recover(); err != nil {
|
|
||||||
notifier.Config.log("bugsnag/notifier.Notify: panic! %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add a severity to raw data only if the default is not set.
|
|
||||||
func (notifier *Notifier) addDefaultSeverity(rawData []interface{}, s severity) []interface{} {
|
|
||||||
|
|
||||||
for _, datum := range append(notifier.RawData, rawData...) {
|
|
||||||
if _, ok := datum.(severity); ok {
|
|
||||||
return rawData
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return append(rawData, s)
|
|
||||||
}
|
|
|
@ -1,27 +0,0 @@
|
||||||
// +build !appengine
|
|
||||||
|
|
||||||
package bugsnag
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/bugsnag/panicwrap"
|
|
||||||
"github.com/bugsnag/bugsnag-go/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NOTE: this function does not return when you call it, instead it
|
|
||||||
// re-exec()s the current process with panic monitoring.
|
|
||||||
func defaultPanicHandler() {
|
|
||||||
defer defaultNotifier.dontPanic()
|
|
||||||
|
|
||||||
err := panicwrap.BasicMonitor(func(output string) {
|
|
||||||
toNotify, err := errors.ParsePanic(output)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
defaultNotifier.Config.log("bugsnag.handleUncaughtPanic: %v", err)
|
|
||||||
}
|
|
||||||
Notify(toNotify, SeverityError, Configuration{Synchronous: true})
|
|
||||||
})
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
defaultNotifier.Config.log("bugsnag.handleUncaughtPanic: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,96 +0,0 @@
|
||||||
package bugsnag
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
)
|
|
||||||
|
|
||||||
type payload struct {
|
|
||||||
*Event
|
|
||||||
*Configuration
|
|
||||||
}
|
|
||||||
|
|
||||||
type hash map[string]interface{}
|
|
||||||
|
|
||||||
func (p *payload) deliver() error {
|
|
||||||
|
|
||||||
if len(p.APIKey) != 32 {
|
|
||||||
return fmt.Errorf("bugsnag/payload.deliver: invalid api key")
|
|
||||||
}
|
|
||||||
|
|
||||||
buf, err := json.Marshal(p)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("bugsnag/payload.deliver: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
client := http.Client{
|
|
||||||
Transport: p.Transport,
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := client.Post(p.Endpoint, "application/json", bytes.NewBuffer(buf))
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("bugsnag/payload.deliver: %v", err)
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
if resp.StatusCode != 200 {
|
|
||||||
return fmt.Errorf("bugsnag/payload.deliver: Got HTTP %s\n", resp.Status)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *payload) MarshalJSON() ([]byte, error) {
|
|
||||||
|
|
||||||
data := hash{
|
|
||||||
"apiKey": p.APIKey,
|
|
||||||
|
|
||||||
"notifier": hash{
|
|
||||||
"name": "Bugsnag Go",
|
|
||||||
"url": "https://github.com/bugsnag/bugsnag-go",
|
|
||||||
"version": VERSION,
|
|
||||||
},
|
|
||||||
|
|
||||||
"events": []hash{
|
|
||||||
{
|
|
||||||
"payloadVersion": "2",
|
|
||||||
"exceptions": []hash{
|
|
||||||
{
|
|
||||||
"errorClass": p.ErrorClass,
|
|
||||||
"message": p.Message,
|
|
||||||
"stacktrace": p.Stacktrace,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"severity": p.Severity.String,
|
|
||||||
"app": hash{
|
|
||||||
"releaseStage": p.ReleaseStage,
|
|
||||||
},
|
|
||||||
"user": p.User,
|
|
||||||
"metaData": p.MetaData.sanitize(p.ParamsFilters),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
event := data["events"].([]hash)[0]
|
|
||||||
|
|
||||||
if p.Context != "" {
|
|
||||||
event["context"] = p.Context
|
|
||||||
}
|
|
||||||
if p.GroupingHash != "" {
|
|
||||||
event["groupingHash"] = p.GroupingHash
|
|
||||||
}
|
|
||||||
if p.Hostname != "" {
|
|
||||||
event["device"] = hash{
|
|
||||||
"hostname": p.Hostname,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if p.AppVersion != "" {
|
|
||||||
event["app"].(hash)["version"] = p.AppVersion
|
|
||||||
}
|
|
||||||
return json.Marshal(data)
|
|
||||||
|
|
||||||
}
|
|
|
@ -1,20 +0,0 @@
|
||||||
Copyright (c) 2012 Daniel Theophanes
|
|
||||||
|
|
||||||
This software is provided 'as-is', without any express or implied
|
|
||||||
warranty. In no event will the authors be held liable for any damages
|
|
||||||
arising from the use of this software.
|
|
||||||
|
|
||||||
Permission is granted to anyone to use this software for any purpose,
|
|
||||||
including commercial applications, and to alter it and redistribute it
|
|
||||||
freely, subject to the following restrictions:
|
|
||||||
|
|
||||||
1. The origin of this software must not be misrepresented; you must not
|
|
||||||
claim that you wrote the original software. If you use this software
|
|
||||||
in a product, an acknowledgment in the product documentation would be
|
|
||||||
appreciated but is not required.
|
|
||||||
|
|
||||||
2. Altered source versions must be plainly marked as such, and must not be
|
|
||||||
misrepresented as being the original software.
|
|
||||||
|
|
||||||
3. This notice may not be removed or altered from any source
|
|
||||||
distribution.
|
|
|
@ -1,32 +0,0 @@
|
||||||
// Copyright 2012 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Extensions to the standard "os" package.
|
|
||||||
package osext
|
|
||||||
|
|
||||||
import "path/filepath"
|
|
||||||
|
|
||||||
// Executable returns an absolute path that can be used to
|
|
||||||
// re-invoke the current program.
|
|
||||||
// It may not be valid after the current program exits.
|
|
||||||
func Executable() (string, error) {
|
|
||||||
p, err := executable()
|
|
||||||
return filepath.Clean(p), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns same path as Executable, returns just the folder
|
|
||||||
// path. Excludes the executable name.
|
|
||||||
func ExecutableFolder() (string, error) {
|
|
||||||
p, err := Executable()
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
folder, _ := filepath.Split(p)
|
|
||||||
return folder, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Depricated. Same as Executable().
|
|
||||||
func GetExePath() (exePath string, err error) {
|
|
||||||
return Executable()
|
|
||||||
}
|
|
|
@ -1,16 +0,0 @@
|
||||||
// Copyright 2012 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package osext
|
|
||||||
|
|
||||||
import "syscall"
|
|
||||||
|
|
||||||
func executable() (string, error) {
|
|
||||||
f, err := Open("/proc/" + itoa(Getpid()) + "/text")
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
return syscall.Fd2path(int(f.Fd()))
|
|
||||||
}
|
|
|
@ -1,25 +0,0 @@
|
||||||
// Copyright 2012 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build linux netbsd openbsd
|
|
||||||
|
|
||||||
package osext
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"os"
|
|
||||||
"runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
func executable() (string, error) {
|
|
||||||
switch runtime.GOOS {
|
|
||||||
case "linux":
|
|
||||||
return os.Readlink("/proc/self/exe")
|
|
||||||
case "netbsd":
|
|
||||||
return os.Readlink("/proc/curproc/exe")
|
|
||||||
case "openbsd":
|
|
||||||
return os.Readlink("/proc/curproc/file")
|
|
||||||
}
|
|
||||||
return "", errors.New("ExecPath not implemented for " + runtime.GOOS)
|
|
||||||
}
|
|
|
@ -1,64 +0,0 @@
|
||||||
// Copyright 2012 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build darwin freebsd
|
|
||||||
|
|
||||||
package osext
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"runtime"
|
|
||||||
"syscall"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
var startUpcwd, getwdError = os.Getwd()
|
|
||||||
|
|
||||||
func executable() (string, error) {
|
|
||||||
var mib [4]int32
|
|
||||||
switch runtime.GOOS {
|
|
||||||
case "freebsd":
|
|
||||||
mib = [4]int32{1 /* CTL_KERN */, 14 /* KERN_PROC */, 12 /* KERN_PROC_PATHNAME */, -1}
|
|
||||||
case "darwin":
|
|
||||||
mib = [4]int32{1 /* CTL_KERN */, 38 /* KERN_PROCARGS */, int32(os.Getpid()), -1}
|
|
||||||
}
|
|
||||||
|
|
||||||
n := uintptr(0)
|
|
||||||
// get length
|
|
||||||
_, _, err := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, 0, uintptr(unsafe.Pointer(&n)), 0, 0)
|
|
||||||
if err != 0 {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
if n == 0 { // shouldn't happen
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
buf := make([]byte, n)
|
|
||||||
_, _, err = syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, uintptr(unsafe.Pointer(&buf[0])), uintptr(unsafe.Pointer(&n)), 0, 0)
|
|
||||||
if err != 0 {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
if n == 0 { // shouldn't happen
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
for i, v := range buf {
|
|
||||||
if v == 0 {
|
|
||||||
buf = buf[:i]
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if buf[0] != '/' {
|
|
||||||
if getwdError != nil {
|
|
||||||
return string(buf), getwdError
|
|
||||||
} else {
|
|
||||||
if buf[0] == '.' {
|
|
||||||
buf = buf[1:]
|
|
||||||
}
|
|
||||||
if startUpcwd[len(startUpcwd)-1] != '/' {
|
|
||||||
return startUpcwd + "/" + string(buf), nil
|
|
||||||
}
|
|
||||||
return startUpcwd + string(buf), nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return string(buf), nil
|
|
||||||
}
|
|
|
@ -1,34 +0,0 @@
|
||||||
// Copyright 2012 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package osext
|
|
||||||
|
|
||||||
import (
|
|
||||||
"syscall"
|
|
||||||
"unicode/utf16"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
kernel = syscall.MustLoadDLL("kernel32.dll")
|
|
||||||
getModuleFileNameProc = kernel.MustFindProc("GetModuleFileNameW")
|
|
||||||
)
|
|
||||||
|
|
||||||
// GetModuleFileName() with hModule = NULL
|
|
||||||
func executable() (exePath string, err error) {
|
|
||||||
return getModuleFileName()
|
|
||||||
}
|
|
||||||
|
|
||||||
func getModuleFileName() (string, error) {
|
|
||||||
var n uint32
|
|
||||||
b := make([]uint16, syscall.MAX_PATH)
|
|
||||||
size := uint32(len(b))
|
|
||||||
|
|
||||||
r0, _, e1 := getModuleFileNameProc.Call(0, uintptr(unsafe.Pointer(&b[0])), uintptr(size))
|
|
||||||
n = uint32(r0)
|
|
||||||
if n == 0 {
|
|
||||||
return "", e1
|
|
||||||
}
|
|
||||||
return string(utf16.Decode(b[0:n])), nil
|
|
||||||
}
|
|
|
@ -1,21 +0,0 @@
|
||||||
The MIT License (MIT)
|
|
||||||
|
|
||||||
Copyright (c) 2013 Mitchell Hashimoto
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in
|
|
||||||
all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
||||||
THE SOFTWARE.
|
|
|
@ -1,101 +0,0 @@
|
||||||
# panicwrap
|
|
||||||
|
|
||||||
panicwrap is a Go library that re-executes a Go binary and monitors stderr
|
|
||||||
output from the binary for a panic. When it find a panic, it executes a
|
|
||||||
user-defined handler function. Stdout, stderr, stdin, signals, and exit
|
|
||||||
codes continue to work as normal, making the existence of panicwrap mostly
|
|
||||||
invisble to the end user until a panic actually occurs.
|
|
||||||
|
|
||||||
Since a panic is truly a bug in the program meant to crash the runtime,
|
|
||||||
globally catching panics within Go applications is not supposed to be possible.
|
|
||||||
Despite this, it is often useful to have a way to know when panics occur.
|
|
||||||
panicwrap allows you to do something with these panics, such as writing them
|
|
||||||
to a file, so that you can track when panics occur.
|
|
||||||
|
|
||||||
panicwrap is ***not a panic recovery system***. Panics indicate serious
|
|
||||||
problems with your application and _should_ crash the runtime. panicwrap
|
|
||||||
is just meant as a way to monitor for panics. If you still think this is
|
|
||||||
the worst idea ever, read the section below on why.
|
|
||||||
|
|
||||||
## Features
|
|
||||||
|
|
||||||
* **SIMPLE!**
|
|
||||||
* Works with all Go applications on all platforms Go supports
|
|
||||||
* Custom behavior when a panic occurs
|
|
||||||
* Stdout, stderr, stdin, exit codes, and signals continue to work as
|
|
||||||
expected.
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
Using panicwrap is simple. It behaves a lot like `fork`, if you know
|
|
||||||
how that works. A basic example is shown below.
|
|
||||||
|
|
||||||
Because it would be sad to panic while capturing a panic, it is recommended
|
|
||||||
that the handler functions for panicwrap remain relatively simple and well
|
|
||||||
tested. panicwrap itself contains many tests.
|
|
||||||
|
|
||||||
```go
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"github.com/mitchellh/panicwrap"
|
|
||||||
"os"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
exitStatus, err := panicwrap.BasicWrap(panicHandler)
|
|
||||||
if err != nil {
|
|
||||||
// Something went wrong setting up the panic wrapper. Unlikely,
|
|
||||||
// but possible.
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// If exitStatus >= 0, then we're the parent process and the panicwrap
|
|
||||||
// re-executed ourselves and completed. Just exit with the proper status.
|
|
||||||
if exitStatus >= 0 {
|
|
||||||
os.Exit(exitStatus)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Otherwise, exitStatus < 0 means we're the child. Continue executing as
|
|
||||||
// normal...
|
|
||||||
|
|
||||||
// Let's say we panic
|
|
||||||
panic("oh shucks")
|
|
||||||
}
|
|
||||||
|
|
||||||
func panicHandler(output string) {
|
|
||||||
// output contains the full output (including stack traces) of the
|
|
||||||
// panic. Put it in a file or something.
|
|
||||||
fmt.Printf("The child panicked:\n\n%s\n", output)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## How Does it Work?
|
|
||||||
|
|
||||||
panicwrap works by re-executing the running program (retaining arguments,
|
|
||||||
environmental variables, etc.) and monitoring the stderr of the program.
|
|
||||||
Since Go always outputs panics in a predictable way with a predictable
|
|
||||||
exit code, panicwrap is able to reliably detect panics and allow the parent
|
|
||||||
process to handle them.
|
|
||||||
|
|
||||||
## WHY?! Panics should CRASH!
|
|
||||||
|
|
||||||
Yes, panics _should_ crash. They are 100% always indicative of bugs.
|
|
||||||
However, in some cases, such as user-facing programs (programs like
|
|
||||||
[Packer](http://github.com/mitchellh/packer) or
|
|
||||||
[Docker](http://github.com/dotcloud/docker)), it is up to the user to
|
|
||||||
report such panics. This is unreliable, at best, and it would be better if the
|
|
||||||
program could have a way to automatically report panics. panicwrap provides
|
|
||||||
a way to do this.
|
|
||||||
|
|
||||||
For backend applications, it is easier to detect crashes (since the application
|
|
||||||
exits). However, it is still nice sometimes to more intelligently log
|
|
||||||
panics in some way. For example, at [HashiCorp](http://www.hashicorp.com),
|
|
||||||
we use panicwrap to log panics to timestamped files with some additional
|
|
||||||
data (configuration settings at the time, environmental variables, etc.)
|
|
||||||
|
|
||||||
The goal of panicwrap is _not_ to hide panics. It is instead to provide
|
|
||||||
a clean mechanism for handling them before bubbling the up to the user
|
|
||||||
and ultimately crashing.
|
|
|
@ -1,11 +0,0 @@
|
||||||
// +build darwin dragonfly freebsd linux,!arm64 netbsd openbsd
|
|
||||||
|
|
||||||
package panicwrap
|
|
||||||
|
|
||||||
import (
|
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
func dup2(oldfd, newfd int) error {
|
|
||||||
return syscall.Dup2(oldfd, newfd)
|
|
||||||
}
|
|
|
@ -1,11 +0,0 @@
|
||||||
// +build linux,arm64
|
|
||||||
|
|
||||||
package panicwrap
|
|
||||||
|
|
||||||
import (
|
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
func dup2(oldfd, newfd int) error {
|
|
||||||
return syscall.Dup3(oldfd, newfd, 0)
|
|
||||||
}
|
|
|
@ -1,62 +0,0 @@
|
||||||
// +build !windows
|
|
||||||
|
|
||||||
package panicwrap
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/bugsnag/osext"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
)
|
|
||||||
|
|
||||||
func monitor(c *WrapConfig) (int, error) {
|
|
||||||
|
|
||||||
// If we're the child process, absorb panics.
|
|
||||||
if Wrapped(c) {
|
|
||||||
panicCh := make(chan string)
|
|
||||||
|
|
||||||
go trackPanic(os.Stdin, os.Stderr, c.DetectDuration, panicCh)
|
|
||||||
|
|
||||||
// Wait on the panic data
|
|
||||||
panicTxt := <-panicCh
|
|
||||||
if panicTxt != "" {
|
|
||||||
if !c.HidePanic {
|
|
||||||
os.Stderr.Write([]byte(panicTxt))
|
|
||||||
}
|
|
||||||
|
|
||||||
c.Handler(panicTxt)
|
|
||||||
}
|
|
||||||
|
|
||||||
os.Exit(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
exePath, err := osext.Executable()
|
|
||||||
if err != nil {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
cmd := exec.Command(exePath, os.Args[1:]...)
|
|
||||||
|
|
||||||
read, write, err := os.Pipe()
|
|
||||||
if err != nil {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd.Stdin = read
|
|
||||||
cmd.Stdout = os.Stdout
|
|
||||||
cmd.Stderr = os.Stderr
|
|
||||||
cmd.Env = append(os.Environ(), c.CookieKey+"="+c.CookieValue)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
err = cmd.Start()
|
|
||||||
if err != nil {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = dup2(int(write.Fd()), int(os.Stderr.Fd()))
|
|
||||||
if err != nil {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return -1, nil
|
|
||||||
}
|
|
|
@ -1,7 +0,0 @@
|
||||||
package panicwrap
|
|
||||||
|
|
||||||
import "fmt"
|
|
||||||
|
|
||||||
func monitor(c *WrapConfig) (int, error) {
|
|
||||||
return -1, fmt.Errorf("Monitor is not supported on windows")
|
|
||||||
}
|
|
|
@ -1,339 +0,0 @@
|
||||||
// The panicwrap package provides functions for capturing and handling
|
|
||||||
// panics in your application. It does this by re-executing the running
|
|
||||||
// application and monitoring stderr for any panics. At the same time,
|
|
||||||
// stdout/stderr/etc. are set to the same values so that data is shuttled
|
|
||||||
// through properly, making the existence of panicwrap mostly transparent.
|
|
||||||
//
|
|
||||||
// Panics are only detected when the subprocess exits with a non-zero
|
|
||||||
// exit status, since this is the only time panics are real. Otherwise,
|
|
||||||
// "panic-like" output is ignored.
|
|
||||||
package panicwrap
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"errors"
|
|
||||||
"github.com/bugsnag/osext"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"os/signal"
|
|
||||||
"runtime"
|
|
||||||
"syscall"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
DEFAULT_COOKIE_KEY = "cccf35992f8f3cd8d1d28f0109dd953e26664531"
|
|
||||||
DEFAULT_COOKIE_VAL = "7c28215aca87789f95b406b8dd91aa5198406750"
|
|
||||||
)
|
|
||||||
|
|
||||||
// HandlerFunc is the type called when a panic is detected.
|
|
||||||
type HandlerFunc func(string)
|
|
||||||
|
|
||||||
// WrapConfig is the configuration for panicwrap when wrapping an existing
|
|
||||||
// binary. To get started, in general, you only need the BasicWrap function
|
|
||||||
// that will set this up for you. However, for more customizability,
|
|
||||||
// WrapConfig and Wrap can be used.
|
|
||||||
type WrapConfig struct {
|
|
||||||
// Handler is the function called when a panic occurs.
|
|
||||||
Handler HandlerFunc
|
|
||||||
|
|
||||||
// The cookie key and value are used within environmental variables
|
|
||||||
// to tell the child process that it is already executing so that
|
|
||||||
// wrap doesn't re-wrap itself.
|
|
||||||
CookieKey string
|
|
||||||
CookieValue string
|
|
||||||
|
|
||||||
// If true, the panic will not be mirrored to the configured writer
|
|
||||||
// and will instead ONLY go to the handler. This lets you effectively
|
|
||||||
// hide panics from the end user. This is not recommended because if
|
|
||||||
// your handler fails, the panic is effectively lost.
|
|
||||||
HidePanic bool
|
|
||||||
|
|
||||||
// If true, panicwrap will boot a monitor sub-process and let the parent
|
|
||||||
// run the app. This mode is useful for processes run under supervisors
|
|
||||||
// like runit as signals get sent to the correct codebase. This is not
|
|
||||||
// supported when GOOS=windows, and ignores c.Stderr and c.Stdout.
|
|
||||||
Monitor bool
|
|
||||||
|
|
||||||
// The amount of time that a process must exit within after detecting
|
|
||||||
// a panic header for panicwrap to assume it is a panic. Defaults to
|
|
||||||
// 300 milliseconds.
|
|
||||||
DetectDuration time.Duration
|
|
||||||
|
|
||||||
// The writer to send the stderr to. If this is nil, then it defaults
|
|
||||||
// to os.Stderr.
|
|
||||||
Writer io.Writer
|
|
||||||
|
|
||||||
// The writer to send stdout to. If this is nil, then it defaults to
|
|
||||||
// os.Stdout.
|
|
||||||
Stdout io.Writer
|
|
||||||
}
|
|
||||||
|
|
||||||
// BasicWrap calls Wrap with the given handler function, using defaults
|
|
||||||
// for everything else. See Wrap and WrapConfig for more information on
|
|
||||||
// functionality and return values.
|
|
||||||
func BasicWrap(f HandlerFunc) (int, error) {
|
|
||||||
return Wrap(&WrapConfig{
|
|
||||||
Handler: f,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// BasicMonitor calls Wrap with Monitor set to true on supported platforms.
|
|
||||||
// It forks your program and runs it again form the start. In one process
|
|
||||||
// BasicMonitor never returns, it just listens on stderr of the other process,
|
|
||||||
// and calls your handler when a panic is seen. In the other it either returns
|
|
||||||
// nil to indicate that the panic monitoring is enabled, or an error to indicate
|
|
||||||
// that something else went wrong.
|
|
||||||
func BasicMonitor(f HandlerFunc) error {
|
|
||||||
exitStatus, err := Wrap(&WrapConfig{
|
|
||||||
Handler: f,
|
|
||||||
Monitor: runtime.GOOS != "windows",
|
|
||||||
})
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if exitStatus >= 0 {
|
|
||||||
os.Exit(exitStatus)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrap wraps the current executable in a handler to catch panics. It
|
|
||||||
// returns an error if there was an error during the wrapping process.
|
|
||||||
// If the error is nil, then the int result indicates the exit status of the
|
|
||||||
// child process. If the exit status is -1, then this is the child process,
|
|
||||||
// and execution should continue as normal. Otherwise, this is the parent
|
|
||||||
// process and the child successfully ran already, and you should exit the
|
|
||||||
// process with the returned exit status.
|
|
||||||
//
|
|
||||||
// This function should be called very very early in your program's execution.
|
|
||||||
// Ideally, this runs as the first line of code of main.
|
|
||||||
//
|
|
||||||
// Once this is called, the given WrapConfig shouldn't be modified or used
|
|
||||||
// any further.
|
|
||||||
func Wrap(c *WrapConfig) (int, error) {
|
|
||||||
if c.Handler == nil {
|
|
||||||
return -1, errors.New("Handler must be set")
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.DetectDuration == 0 {
|
|
||||||
c.DetectDuration = 300 * time.Millisecond
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.Writer == nil {
|
|
||||||
c.Writer = os.Stderr
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.Monitor {
|
|
||||||
return monitor(c)
|
|
||||||
} else {
|
|
||||||
return wrap(c)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func wrap(c *WrapConfig) (int, error) {
|
|
||||||
|
|
||||||
// If we're already wrapped, exit out.
|
|
||||||
if Wrapped(c) {
|
|
||||||
return -1, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the path to our current executable
|
|
||||||
exePath, err := osext.Executable()
|
|
||||||
if err != nil {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pipe the stderr so we can read all the data as we look for panics
|
|
||||||
stderr_r, stderr_w := io.Pipe()
|
|
||||||
|
|
||||||
// doneCh is closed when we're done, signaling any other goroutines
|
|
||||||
// to end immediately.
|
|
||||||
doneCh := make(chan struct{})
|
|
||||||
|
|
||||||
// panicCh is the channel on which the panic text will actually be
|
|
||||||
// sent.
|
|
||||||
panicCh := make(chan string)
|
|
||||||
|
|
||||||
// On close, make sure to finish off the copying of data to stderr
|
|
||||||
defer func() {
|
|
||||||
defer close(doneCh)
|
|
||||||
stderr_w.Close()
|
|
||||||
<-panicCh
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Start the goroutine that will watch stderr for any panics
|
|
||||||
go trackPanic(stderr_r, c.Writer, c.DetectDuration, panicCh)
|
|
||||||
|
|
||||||
// Create the writer for stdout that we're going to use
|
|
||||||
var stdout_w io.Writer = os.Stdout
|
|
||||||
if c.Stdout != nil {
|
|
||||||
stdout_w = c.Stdout
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build a subcommand to re-execute ourselves. We make sure to
|
|
||||||
// set the environmental variable to include our cookie. We also
|
|
||||||
// set stdin/stdout to match the config. Finally, we pipe stderr
|
|
||||||
// through ourselves in order to watch for panics.
|
|
||||||
cmd := exec.Command(exePath, os.Args[1:]...)
|
|
||||||
cmd.Env = append(os.Environ(), c.CookieKey+"="+c.CookieValue)
|
|
||||||
cmd.Stdin = os.Stdin
|
|
||||||
cmd.Stdout = stdout_w
|
|
||||||
cmd.Stderr = stderr_w
|
|
||||||
if err := cmd.Start(); err != nil {
|
|
||||||
return 1, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Listen to signals and capture them forever. We allow the child
|
|
||||||
// process to handle them in some way.
|
|
||||||
sigCh := make(chan os.Signal)
|
|
||||||
signal.Notify(sigCh, os.Interrupt)
|
|
||||||
go func() {
|
|
||||||
defer signal.Stop(sigCh)
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-doneCh:
|
|
||||||
return
|
|
||||||
case <-sigCh:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
if err := cmd.Wait(); err != nil {
|
|
||||||
exitErr, ok := err.(*exec.ExitError)
|
|
||||||
if !ok {
|
|
||||||
// This is some other kind of subprocessing error.
|
|
||||||
return 1, err
|
|
||||||
}
|
|
||||||
|
|
||||||
exitStatus := 1
|
|
||||||
if status, ok := exitErr.Sys().(syscall.WaitStatus); ok {
|
|
||||||
exitStatus = status.ExitStatus()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close the writer end so that the tracker goroutine ends at some point
|
|
||||||
stderr_w.Close()
|
|
||||||
|
|
||||||
// Wait on the panic data
|
|
||||||
panicTxt := <-panicCh
|
|
||||||
if panicTxt != "" {
|
|
||||||
if !c.HidePanic {
|
|
||||||
c.Writer.Write([]byte(panicTxt))
|
|
||||||
}
|
|
||||||
|
|
||||||
c.Handler(panicTxt)
|
|
||||||
}
|
|
||||||
|
|
||||||
return exitStatus, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapped checks if we're already wrapped according to the configuration
|
|
||||||
// given.
|
|
||||||
//
|
|
||||||
// Wrapped is very cheap and can be used early to short-circuit some pre-wrap
|
|
||||||
// logic your application may have.
|
|
||||||
func Wrapped(c *WrapConfig) bool {
|
|
||||||
if c.CookieKey == "" {
|
|
||||||
c.CookieKey = DEFAULT_COOKIE_KEY
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.CookieValue == "" {
|
|
||||||
c.CookieValue = DEFAULT_COOKIE_VAL
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the cookie key/value match our environment, then we are the
|
|
||||||
// child, so just exit now and tell the caller that we're the child
|
|
||||||
return os.Getenv(c.CookieKey) == c.CookieValue
|
|
||||||
}
|
|
||||||
|
|
||||||
// trackPanic monitors the given reader for a panic. If a panic is detected,
|
|
||||||
// it is outputted on the result channel. This will close the channel once
|
|
||||||
// it is complete.
|
|
||||||
func trackPanic(r io.Reader, w io.Writer, dur time.Duration, result chan<- string) {
|
|
||||||
defer close(result)
|
|
||||||
|
|
||||||
var panicTimer <-chan time.Time
|
|
||||||
panicBuf := new(bytes.Buffer)
|
|
||||||
panicHeader := []byte("panic:")
|
|
||||||
|
|
||||||
tempBuf := make([]byte, 2048)
|
|
||||||
for {
|
|
||||||
var buf []byte
|
|
||||||
var n int
|
|
||||||
|
|
||||||
if panicTimer == nil && panicBuf.Len() > 0 {
|
|
||||||
// We're not tracking a panic but the buffer length is
|
|
||||||
// greater than 0. We need to clear out that buffer, but
|
|
||||||
// look for another panic along the way.
|
|
||||||
|
|
||||||
// First, remove the previous panic header so we don't loop
|
|
||||||
w.Write(panicBuf.Next(len(panicHeader)))
|
|
||||||
|
|
||||||
// Next, assume that this is our new buffer to inspect
|
|
||||||
n = panicBuf.Len()
|
|
||||||
buf = make([]byte, n)
|
|
||||||
copy(buf, panicBuf.Bytes())
|
|
||||||
panicBuf.Reset()
|
|
||||||
} else {
|
|
||||||
var err error
|
|
||||||
buf = tempBuf
|
|
||||||
n, err = r.Read(buf)
|
|
||||||
if n <= 0 && err == io.EOF {
|
|
||||||
if panicBuf.Len() > 0 {
|
|
||||||
// We were tracking a panic, assume it was a panic
|
|
||||||
// and return that as the result.
|
|
||||||
result <- panicBuf.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if panicTimer != nil {
|
|
||||||
// We're tracking what we think is a panic right now.
|
|
||||||
// If the timer ended, then it is not a panic.
|
|
||||||
isPanic := true
|
|
||||||
select {
|
|
||||||
case <-panicTimer:
|
|
||||||
isPanic = false
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
|
|
||||||
// No matter what, buffer the text some more.
|
|
||||||
panicBuf.Write(buf[0:n])
|
|
||||||
|
|
||||||
if !isPanic {
|
|
||||||
// It isn't a panic, stop tracking. Clean-up will happen
|
|
||||||
// on the next iteration.
|
|
||||||
panicTimer = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
flushIdx := n
|
|
||||||
idx := bytes.Index(buf[0:n], panicHeader)
|
|
||||||
if idx >= 0 {
|
|
||||||
flushIdx = idx
|
|
||||||
}
|
|
||||||
|
|
||||||
// Flush to stderr what isn't a panic
|
|
||||||
w.Write(buf[0:flushIdx])
|
|
||||||
|
|
||||||
if idx < 0 {
|
|
||||||
// Not a panic so just continue along
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// We have a panic header. Write we assume is a panic os far.
|
|
||||||
panicBuf.Write(buf[idx:n])
|
|
||||||
panicTimer = time.After(dur)
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,202 +0,0 @@
|
||||||
// Package auth defines a standard interface for request access controllers.
|
|
||||||
//
|
|
||||||
// An access controller has a simple interface with a single `Authorized`
|
|
||||||
// method which checks that a given request is authorized to perform one or
|
|
||||||
// more actions on one or more resources. This method should return a non-nil
|
|
||||||
// error if the request is not authorized.
|
|
||||||
//
|
|
||||||
// An implementation registers its access controller by name with a constructor
|
|
||||||
// which accepts an options map for configuring the access controller.
|
|
||||||
//
|
|
||||||
// options := map[string]interface{}{"sillySecret": "whysosilly?"}
|
|
||||||
// accessController, _ := auth.GetAccessController("silly", options)
|
|
||||||
//
|
|
||||||
// This `accessController` can then be used in a request handler like so:
|
|
||||||
//
|
|
||||||
// func updateOrder(w http.ResponseWriter, r *http.Request) {
|
|
||||||
// orderNumber := r.FormValue("orderNumber")
|
|
||||||
// resource := auth.Resource{Type: "customerOrder", Name: orderNumber}
|
|
||||||
// access := auth.Access{Resource: resource, Action: "update"}
|
|
||||||
//
|
|
||||||
// if ctx, err := accessController.Authorized(ctx, access); err != nil {
|
|
||||||
// if challenge, ok := err.(auth.Challenge) {
|
|
||||||
// // Let the challenge write the response.
|
|
||||||
// challenge.SetHeaders(w)
|
|
||||||
// w.WriteHeader(http.StatusUnauthorized)
|
|
||||||
// return
|
|
||||||
// } else {
|
|
||||||
// // Some other error.
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
package auth
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
"github.com/docker/distribution/context"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// UserKey is used to get the user object from
|
|
||||||
// a user context
|
|
||||||
UserKey = "auth.user"
|
|
||||||
|
|
||||||
// UserNameKey is used to get the user name from
|
|
||||||
// a user context
|
|
||||||
UserNameKey = "auth.user.name"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrInvalidCredential is returned when the auth token does not authenticate correctly.
|
|
||||||
ErrInvalidCredential = errors.New("invalid authorization credential")
|
|
||||||
|
|
||||||
// ErrAuthenticationFailure returned when authentication fails.
|
|
||||||
ErrAuthenticationFailure = errors.New("authentication failure")
|
|
||||||
)
|
|
||||||
|
|
||||||
// UserInfo carries information about
|
|
||||||
// an autenticated/authorized client.
|
|
||||||
type UserInfo struct {
|
|
||||||
Name string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Resource describes a resource by type and name.
|
|
||||||
type Resource struct {
|
|
||||||
Type string
|
|
||||||
Class string
|
|
||||||
Name string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Access describes a specific action that is
|
|
||||||
// requested or allowed for a given resource.
|
|
||||||
type Access struct {
|
|
||||||
Resource
|
|
||||||
Action string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Challenge is a special error type which is used for HTTP 401 Unauthorized
|
|
||||||
// responses and is able to write the response with WWW-Authenticate challenge
|
|
||||||
// header values based on the error.
|
|
||||||
type Challenge interface {
|
|
||||||
error
|
|
||||||
|
|
||||||
// SetHeaders prepares the request to conduct a challenge response by
|
|
||||||
// adding the an HTTP challenge header on the response message. Callers
|
|
||||||
// are expected to set the appropriate HTTP status code (e.g. 401)
|
|
||||||
// themselves.
|
|
||||||
SetHeaders(w http.ResponseWriter)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AccessController controls access to registry resources based on a request
|
|
||||||
// and required access levels for a request. Implementations can support both
|
|
||||||
// complete denial and http authorization challenges.
|
|
||||||
type AccessController interface {
|
|
||||||
// Authorized returns a non-nil error if the context is granted access and
|
|
||||||
// returns a new authorized context. If one or more Access structs are
|
|
||||||
// provided, the requested access will be compared with what is available
|
|
||||||
// to the context. The given context will contain a "http.request" key with
|
|
||||||
// a `*http.Request` value. If the error is non-nil, access should always
|
|
||||||
// be denied. The error may be of type Challenge, in which case the caller
|
|
||||||
// may have the Challenge handle the request or choose what action to take
|
|
||||||
// based on the Challenge header or response status. The returned context
|
|
||||||
// object should have a "auth.user" value set to a UserInfo struct.
|
|
||||||
Authorized(ctx context.Context, access ...Access) (context.Context, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CredentialAuthenticator is an object which is able to authenticate credentials
|
|
||||||
type CredentialAuthenticator interface {
|
|
||||||
AuthenticateUser(username, password string) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithUser returns a context with the authorized user info.
|
|
||||||
func WithUser(ctx context.Context, user UserInfo) context.Context {
|
|
||||||
return userInfoContext{
|
|
||||||
Context: ctx,
|
|
||||||
user: user,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type userInfoContext struct {
|
|
||||||
context.Context
|
|
||||||
user UserInfo
|
|
||||||
}
|
|
||||||
|
|
||||||
func (uic userInfoContext) Value(key interface{}) interface{} {
|
|
||||||
switch key {
|
|
||||||
case UserKey:
|
|
||||||
return uic.user
|
|
||||||
case UserNameKey:
|
|
||||||
return uic.user.Name
|
|
||||||
}
|
|
||||||
|
|
||||||
return uic.Context.Value(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithResources returns a context with the authorized resources.
|
|
||||||
func WithResources(ctx context.Context, resources []Resource) context.Context {
|
|
||||||
return resourceContext{
|
|
||||||
Context: ctx,
|
|
||||||
resources: resources,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type resourceContext struct {
|
|
||||||
context.Context
|
|
||||||
resources []Resource
|
|
||||||
}
|
|
||||||
|
|
||||||
type resourceKey struct{}
|
|
||||||
|
|
||||||
func (rc resourceContext) Value(key interface{}) interface{} {
|
|
||||||
if key == (resourceKey{}) {
|
|
||||||
return rc.resources
|
|
||||||
}
|
|
||||||
|
|
||||||
return rc.Context.Value(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AuthorizedResources returns the list of resources which have
|
|
||||||
// been authorized for this request.
|
|
||||||
func AuthorizedResources(ctx context.Context) []Resource {
|
|
||||||
if resources, ok := ctx.Value(resourceKey{}).([]Resource); ok {
|
|
||||||
return resources
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// InitFunc is the type of an AccessController factory function and is used
|
|
||||||
// to register the constructor for different AccesController backends.
|
|
||||||
type InitFunc func(options map[string]interface{}) (AccessController, error)
|
|
||||||
|
|
||||||
var accessControllers map[string]InitFunc
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
accessControllers = make(map[string]InitFunc)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Register is used to register an InitFunc for
|
|
||||||
// an AccessController backend with the given name.
|
|
||||||
func Register(name string, initFunc InitFunc) error {
|
|
||||||
if _, exists := accessControllers[name]; exists {
|
|
||||||
return fmt.Errorf("name already registered: %s", name)
|
|
||||||
}
|
|
||||||
|
|
||||||
accessControllers[name] = initFunc
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetAccessController constructs an AccessController
|
|
||||||
// with the given options using the named backend.
|
|
||||||
func GetAccessController(name string, options map[string]interface{}) (AccessController, error) {
|
|
||||||
if initFunc, exists := accessControllers[name]; exists {
|
|
||||||
return initFunc(options)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, fmt.Errorf("no access controller registered with name: %s", name)
|
|
||||||
}
|
|
|
@ -1,3 +1,88 @@
|
||||||
|
/*
|
||||||
|
Package client implements everything required for interacting with a Notary repository.
|
||||||
|
|
||||||
|
Usage
|
||||||
|
|
||||||
|
Use this package by creating a new repository object and calling methods on it.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/docker/distribution/registry/client/auth"
|
||||||
|
"github.com/docker/distribution/registry/client/auth/challenge"
|
||||||
|
"github.com/docker/distribution/registry/client/transport"
|
||||||
|
notary "github.com/docker/notary/client"
|
||||||
|
"github.com/docker/notary/trustpinning"
|
||||||
|
"github.com/docker/notary/tuf/data"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
rootDir := ".trust"
|
||||||
|
if err := os.MkdirAll(rootDir, 0700); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
server := "https://notary.docker.io"
|
||||||
|
image := "docker.io/library/alpine"
|
||||||
|
repo, err := notary.NewFileCachedNotaryRepository(
|
||||||
|
rootDir,
|
||||||
|
data.GUN(image),
|
||||||
|
server,
|
||||||
|
makeHubTransport(server, image),
|
||||||
|
nil,
|
||||||
|
trustpinning.TrustPinConfig{},
|
||||||
|
)
|
||||||
|
|
||||||
|
targets, err := repo.ListTargets()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tgt := range targets {
|
||||||
|
fmt.Printf("%s\t%s\n", tgt.Name, hex.EncodeToString(tgt.Hashes["sha256"]))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeHubTransport(server, image string) http.RoundTripper {
|
||||||
|
base := http.DefaultTransport
|
||||||
|
modifiers := []transport.RequestModifier{
|
||||||
|
transport.NewHeaderRequestModifier(http.Header{
|
||||||
|
"User-Agent": []string{"my-client"},
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
|
||||||
|
authTransport := transport.NewTransport(base, modifiers...)
|
||||||
|
pingClient := &http.Client{
|
||||||
|
Transport: authTransport,
|
||||||
|
Timeout: 5 * time.Second,
|
||||||
|
}
|
||||||
|
req, err := http.NewRequest("GET", server+"/v2/", nil)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
challengeManager := challenge.NewSimpleManager()
|
||||||
|
resp, err := pingClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
if err := challengeManager.AddResponse(resp); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
tokenHandler := auth.NewTokenHandler(base, nil, image, "pull")
|
||||||
|
modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, auth.NewBasicHandler(nil)))
|
||||||
|
|
||||||
|
return transport.NewTransport(base, modifiers...)
|
||||||
|
}
|
||||||
|
|
||||||
|
*/
|
||||||
package client
|
package client
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|
|
@ -206,10 +206,7 @@ func (f *FilesystemStore) Set(name string, meta []byte) error {
|
||||||
os.RemoveAll(fp)
|
os.RemoveAll(fp)
|
||||||
|
|
||||||
// Write the file to disk
|
// Write the file to disk
|
||||||
if err = ioutil.WriteFile(fp, meta, notary.PrivNoExecPerms); err != nil {
|
return ioutil.WriteFile(fp, meta, notary.PrivNoExecPerms)
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemoveAll clears the existing filestore by removing its base directory
|
// RemoveAll clears the existing filestore by removing its base directory
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
package utils
|
package trustmanager
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/pem"
|
"encoding/pem"
|
|
@ -12,9 +12,24 @@ import (
|
||||||
|
|
||||||
// TrustPinConfig represents the configuration under the trust_pinning section of the config file
|
// TrustPinConfig represents the configuration under the trust_pinning section of the config file
|
||||||
// This struct represents the preferred way to bootstrap trust for this repository
|
// This struct represents the preferred way to bootstrap trust for this repository
|
||||||
|
// This is fully optional. If left at the default, uninitialized value Notary will use TOFU over
|
||||||
|
// HTTPS.
|
||||||
|
// You can use this to provide certificates or a CA to pin to as a root of trust for a GUN.
|
||||||
|
// These are used with the following precedence:
|
||||||
|
//
|
||||||
|
// 1. Certs
|
||||||
|
// 2. CA
|
||||||
|
// 3. TOFUS (TOFU over HTTPS)
|
||||||
|
//
|
||||||
|
// Only one trust pinning option will be used to validate a particular GUN.
|
||||||
type TrustPinConfig struct {
|
type TrustPinConfig struct {
|
||||||
|
// CA maps a GUN prefix to file paths containing the root CA.
|
||||||
|
// This file can contain multiple root certificates, bundled in separate PEM blocks.
|
||||||
CA map[string]string
|
CA map[string]string
|
||||||
|
// Certs maps a GUN to a list of certificate IDs
|
||||||
Certs map[string][]string
|
Certs map[string][]string
|
||||||
|
// DisableTOFU, when true, disables "Trust On First Use" of new key data
|
||||||
|
// This is false by default, which means new key data will always be trusted the first time it is seen.
|
||||||
DisableTOFU bool
|
DisableTOFU bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -19,7 +19,9 @@ import (
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
// GUN type for specifying gun
|
// GUN is a Globally Unique Name. It is used to identify trust collections.
|
||||||
|
// An example usage of this is for container image repositories.
|
||||||
|
// For example: myregistry.io/myuser/myimage
|
||||||
type GUN string
|
type GUN string
|
||||||
|
|
||||||
func (g GUN) String() string {
|
func (g GUN) String() string {
|
||||||
|
|
|
@ -1,277 +0,0 @@
|
||||||
// Common configuration elements that may be resused
|
|
||||||
|
|
||||||
package utils
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/tls"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"os/signal"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
bugsnag_hook "github.com/Shopify/logrus-bugsnag"
|
|
||||||
"github.com/bugsnag/bugsnag-go"
|
|
||||||
"github.com/docker/go-connections/tlsconfig"
|
|
||||||
"github.com/go-sql-driver/mysql"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
"github.com/spf13/viper"
|
|
||||||
|
|
||||||
"github.com/docker/notary"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Storage is a configuration about what storage backend a server should use
|
|
||||||
type Storage struct {
|
|
||||||
Backend string
|
|
||||||
Source string
|
|
||||||
}
|
|
||||||
|
|
||||||
// RethinkDBStorage is configuration about a RethinkDB backend service
|
|
||||||
type RethinkDBStorage struct {
|
|
||||||
Storage
|
|
||||||
CA string
|
|
||||||
Cert string
|
|
||||||
DBName string
|
|
||||||
Key string
|
|
||||||
Username string
|
|
||||||
Password string
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetPathRelativeToConfig gets a configuration key which is a path, and if
|
|
||||||
// it is not empty or an absolute path, returns the absolute path relative
|
|
||||||
// to the configuration file
|
|
||||||
func GetPathRelativeToConfig(configuration *viper.Viper, key string) string {
|
|
||||||
configFile := configuration.ConfigFileUsed()
|
|
||||||
p := configuration.GetString(key)
|
|
||||||
if p == "" || filepath.IsAbs(p) {
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
return filepath.Clean(filepath.Join(filepath.Dir(configFile), p))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseServerTLS tries to parse out valid server TLS options from a Viper.
|
|
||||||
// The cert/key files are relative to the config file used to populate the instance
|
|
||||||
// of viper.
|
|
||||||
func ParseServerTLS(configuration *viper.Viper, tlsRequired bool) (*tls.Config, error) {
|
|
||||||
// unmarshalling into objects does not seem to pick up env vars
|
|
||||||
tlsOpts := tlsconfig.Options{
|
|
||||||
CertFile: GetPathRelativeToConfig(configuration, "server.tls_cert_file"),
|
|
||||||
KeyFile: GetPathRelativeToConfig(configuration, "server.tls_key_file"),
|
|
||||||
CAFile: GetPathRelativeToConfig(configuration, "server.client_ca_file"),
|
|
||||||
ExclusiveRootPools: true,
|
|
||||||
}
|
|
||||||
if tlsOpts.CAFile != "" {
|
|
||||||
tlsOpts.ClientAuth = tls.RequireAndVerifyClientCert
|
|
||||||
}
|
|
||||||
|
|
||||||
if !tlsRequired {
|
|
||||||
cert, key, ca := tlsOpts.CertFile, tlsOpts.KeyFile, tlsOpts.CAFile
|
|
||||||
if cert == "" && key == "" && ca == "" {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if (cert == "" && key != "") || (cert != "" && key == "") || (cert == "" && key == "" && ca != "") {
|
|
||||||
return nil, fmt.Errorf(
|
|
||||||
"either include both a cert and key file, or no TLS information at all to disable TLS")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return tlsconfig.Server(tlsOpts)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseLogLevel tries to parse out a log level from a Viper. If there is no
|
|
||||||
// configuration, defaults to the provided error level
|
|
||||||
func ParseLogLevel(configuration *viper.Viper, defaultLevel logrus.Level) (
|
|
||||||
logrus.Level, error) {
|
|
||||||
|
|
||||||
logStr := configuration.GetString("logging.level")
|
|
||||||
if logStr == "" {
|
|
||||||
return defaultLevel, nil
|
|
||||||
}
|
|
||||||
return logrus.ParseLevel(logStr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseSQLStorage tries to parse out Storage from a Viper. If backend and
|
|
||||||
// URL are not provided, returns a nil pointer. Storage is required (if
|
|
||||||
// a backend is not provided, an error will be returned.)
|
|
||||||
func ParseSQLStorage(configuration *viper.Viper) (*Storage, error) {
|
|
||||||
store := Storage{
|
|
||||||
Backend: configuration.GetString("storage.backend"),
|
|
||||||
Source: configuration.GetString("storage.db_url"),
|
|
||||||
}
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case store.Backend != notary.MySQLBackend && store.Backend != notary.SQLiteBackend && store.Backend != notary.PostgresBackend:
|
|
||||||
return nil, fmt.Errorf(
|
|
||||||
"%s is not a supported SQL backend driver",
|
|
||||||
store.Backend,
|
|
||||||
)
|
|
||||||
case store.Source == "":
|
|
||||||
return nil, fmt.Errorf(
|
|
||||||
"must provide a non-empty database source for %s",
|
|
||||||
store.Backend,
|
|
||||||
)
|
|
||||||
case store.Backend == notary.MySQLBackend:
|
|
||||||
urlConfig, err := mysql.ParseDSN(store.Source)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to parse the database source for %s",
|
|
||||||
store.Backend,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
urlConfig.ParseTime = true
|
|
||||||
store.Source = urlConfig.FormatDSN()
|
|
||||||
}
|
|
||||||
return &store, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseRethinkDBStorage tries to parse out Storage from a Viper. If backend and
|
|
||||||
// URL are not provided, returns a nil pointer. Storage is required (if
|
|
||||||
// a backend is not provided, an error will be returned.)
|
|
||||||
func ParseRethinkDBStorage(configuration *viper.Viper) (*RethinkDBStorage, error) {
|
|
||||||
store := RethinkDBStorage{
|
|
||||||
Storage: Storage{
|
|
||||||
Backend: configuration.GetString("storage.backend"),
|
|
||||||
Source: configuration.GetString("storage.db_url"),
|
|
||||||
},
|
|
||||||
CA: GetPathRelativeToConfig(configuration, "storage.tls_ca_file"),
|
|
||||||
Cert: GetPathRelativeToConfig(configuration, "storage.client_cert_file"),
|
|
||||||
Key: GetPathRelativeToConfig(configuration, "storage.client_key_file"),
|
|
||||||
DBName: configuration.GetString("storage.database"),
|
|
||||||
Username: configuration.GetString("storage.username"),
|
|
||||||
Password: configuration.GetString("storage.password"),
|
|
||||||
}
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case store.Backend != notary.RethinkDBBackend:
|
|
||||||
return nil, fmt.Errorf(
|
|
||||||
"%s is not a supported RethinkDB backend driver",
|
|
||||||
store.Backend,
|
|
||||||
)
|
|
||||||
case store.Source == "":
|
|
||||||
return nil, fmt.Errorf(
|
|
||||||
"must provide a non-empty host:port for %s",
|
|
||||||
store.Backend,
|
|
||||||
)
|
|
||||||
case store.CA == "":
|
|
||||||
return nil, fmt.Errorf(
|
|
||||||
"cowardly refusal to connect to %s without a CA cert",
|
|
||||||
store.Backend,
|
|
||||||
)
|
|
||||||
case store.Cert == "" || store.Key == "":
|
|
||||||
return nil, fmt.Errorf(
|
|
||||||
"cowardly refusal to connect to %s without a client cert and key",
|
|
||||||
store.Backend,
|
|
||||||
)
|
|
||||||
case store.DBName == "":
|
|
||||||
return nil, fmt.Errorf(
|
|
||||||
"%s requires a specific database to connect to",
|
|
||||||
store.Backend,
|
|
||||||
)
|
|
||||||
case store.Username == "":
|
|
||||||
return nil, fmt.Errorf(
|
|
||||||
"%s requires a username to connect to the db",
|
|
||||||
store.Backend,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &store, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseBugsnag tries to parse out a Bugsnag Configuration from a Viper.
|
|
||||||
// If no values are provided, returns a nil pointer.
|
|
||||||
func ParseBugsnag(configuration *viper.Viper) (*bugsnag.Configuration, error) {
|
|
||||||
// can't unmarshal because we can't add tags to the bugsnag.Configuration
|
|
||||||
// struct
|
|
||||||
bugconf := bugsnag.Configuration{
|
|
||||||
APIKey: configuration.GetString("reporting.bugsnag.api_key"),
|
|
||||||
ReleaseStage: configuration.GetString("reporting.bugsnag.release_stage"),
|
|
||||||
Endpoint: configuration.GetString("reporting.bugsnag.endpoint"),
|
|
||||||
}
|
|
||||||
if bugconf.APIKey == "" && bugconf.ReleaseStage == "" && bugconf.Endpoint == "" {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
if bugconf.APIKey == "" {
|
|
||||||
return nil, fmt.Errorf("must provide an API key for bugsnag")
|
|
||||||
}
|
|
||||||
return &bugconf, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// utilities for setting up/acting on common configurations
|
|
||||||
|
|
||||||
// SetupViper sets up an instance of viper to also look at environment
|
|
||||||
// variables
|
|
||||||
func SetupViper(v *viper.Viper, envPrefix string) {
|
|
||||||
v.SetEnvPrefix(envPrefix)
|
|
||||||
v.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
|
|
||||||
v.AutomaticEnv()
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetUpBugsnag configures bugsnag and sets up a logrus hook
|
|
||||||
func SetUpBugsnag(config *bugsnag.Configuration) error {
|
|
||||||
if config != nil {
|
|
||||||
bugsnag.Configure(*config)
|
|
||||||
hook, err := bugsnag_hook.NewBugsnagHook()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
logrus.AddHook(hook)
|
|
||||||
logrus.Debug("Adding logrus hook for Bugsnag")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseViper tries to parse out a Viper from a configuration file.
|
|
||||||
func ParseViper(v *viper.Viper, configFile string) error {
|
|
||||||
filename := filepath.Base(configFile)
|
|
||||||
ext := filepath.Ext(configFile)
|
|
||||||
configPath := filepath.Dir(configFile)
|
|
||||||
|
|
||||||
v.SetConfigType(strings.TrimPrefix(ext, "."))
|
|
||||||
v.SetConfigName(strings.TrimSuffix(filename, ext))
|
|
||||||
v.AddConfigPath(configPath)
|
|
||||||
|
|
||||||
if err := v.ReadInConfig(); err != nil {
|
|
||||||
return fmt.Errorf("Could not read config at :%s, viper error: %v", configFile, err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// AdjustLogLevel increases/decreases the log level, return error if the operation is invaild.
|
|
||||||
func AdjustLogLevel(increment bool) error {
|
|
||||||
lvl := logrus.GetLevel()
|
|
||||||
|
|
||||||
// The log level seems not possible, in the foreseeable future,
|
|
||||||
// out of range [Panic, Debug]
|
|
||||||
if increment {
|
|
||||||
if lvl == logrus.DebugLevel {
|
|
||||||
return fmt.Errorf("log level can not be set higher than %s", "Debug")
|
|
||||||
}
|
|
||||||
lvl++
|
|
||||||
} else {
|
|
||||||
if lvl == logrus.PanicLevel {
|
|
||||||
return fmt.Errorf("log level can not be set lower than %s", "Panic")
|
|
||||||
}
|
|
||||||
lvl--
|
|
||||||
}
|
|
||||||
|
|
||||||
logrus.SetLevel(lvl)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetupSignalTrap is a utility to trap supported signals hand handle them (currently by increasing logging)
|
|
||||||
func SetupSignalTrap(handler func(os.Signal)) chan os.Signal {
|
|
||||||
if len(notary.NotarySupportedSignals) == 0 {
|
|
||||||
return nil
|
|
||||||
|
|
||||||
}
|
|
||||||
c := make(chan os.Signal, 1)
|
|
||||||
signal.Notify(c, notary.NotarySupportedSignals...)
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
handler(<-c)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
return c
|
|
||||||
}
|
|
|
@ -1,29 +0,0 @@
|
||||||
// +build !windows
|
|
||||||
|
|
||||||
package utils
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"syscall"
|
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
)
|
|
||||||
|
|
||||||
// LogLevelSignalHandle will increase/decrease the logging level via the signal we get.
|
|
||||||
func LogLevelSignalHandle(sig os.Signal) {
|
|
||||||
switch sig {
|
|
||||||
case syscall.SIGUSR1:
|
|
||||||
if err := AdjustLogLevel(true); err != nil {
|
|
||||||
fmt.Printf("Attempt to increase log level failed, will remain at %s level, error: %s\n", logrus.GetLevel(), err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
case syscall.SIGUSR2:
|
|
||||||
if err := AdjustLogLevel(false); err != nil {
|
|
||||||
fmt.Printf("Attempt to decrease log level failed, will remain at %s level, error: %s\n", logrus.GetLevel(), err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Println("Successfully setting log level to", logrus.GetLevel())
|
|
||||||
}
|
|
|
@ -1,9 +0,0 @@
|
||||||
// +build windows
|
|
||||||
|
|
||||||
package utils
|
|
||||||
|
|
||||||
import "os"
|
|
||||||
|
|
||||||
// LogLevelSignalHandle will do nothing, because we aren't currently supporting signal handling in windows
|
|
||||||
func LogLevelSignalHandle(sig os.Signal) {
|
|
||||||
}
|
|
|
@ -1,252 +0,0 @@
|
||||||
package utils
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
ctxu "github.com/docker/distribution/context"
|
|
||||||
"github.com/docker/distribution/registry/api/errcode"
|
|
||||||
"github.com/docker/distribution/registry/auth"
|
|
||||||
"github.com/gorilla/mux"
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
|
|
||||||
"github.com/docker/notary"
|
|
||||||
"github.com/docker/notary/tuf/signed"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ContextHandler defines an alternate HTTP handler interface which takes in
|
|
||||||
// a context for authorization and returns an HTTP application error.
|
|
||||||
type ContextHandler func(ctx context.Context, w http.ResponseWriter, r *http.Request) error
|
|
||||||
|
|
||||||
// rootHandler is an implementation of an HTTP request handler which handles
|
|
||||||
// authorization and calling out to the defined alternate http handler.
|
|
||||||
type rootHandler struct {
|
|
||||||
handler ContextHandler
|
|
||||||
auth auth.AccessController
|
|
||||||
actions []string
|
|
||||||
context context.Context
|
|
||||||
trust signed.CryptoService
|
|
||||||
}
|
|
||||||
|
|
||||||
// AuthWrapper wraps a Handler with and Auth requirement
|
|
||||||
type AuthWrapper func(ContextHandler, ...string) *rootHandler
|
|
||||||
|
|
||||||
// RootHandlerFactory creates a new rootHandler factory using the given
|
|
||||||
// Context creator and authorizer. The returned factory allows creating
|
|
||||||
// new rootHandlers from the alternate http handler contextHandler and
|
|
||||||
// a scope.
|
|
||||||
func RootHandlerFactory(ctx context.Context, auth auth.AccessController, trust signed.CryptoService) func(ContextHandler, ...string) *rootHandler {
|
|
||||||
return func(handler ContextHandler, actions ...string) *rootHandler {
|
|
||||||
return &rootHandler{
|
|
||||||
handler: handler,
|
|
||||||
auth: auth,
|
|
||||||
actions: actions,
|
|
||||||
context: ctx,
|
|
||||||
trust: trust,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServeHTTP serves an HTTP request and implements the http.Handler interface.
|
|
||||||
func (root *rootHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|
||||||
var (
|
|
||||||
err error
|
|
||||||
ctx = ctxu.WithRequest(root.context, r)
|
|
||||||
log = ctxu.GetRequestLogger(ctx)
|
|
||||||
vars = mux.Vars(r)
|
|
||||||
)
|
|
||||||
ctx, w = ctxu.WithResponseWriter(ctx, w)
|
|
||||||
ctx = ctxu.WithLogger(ctx, log)
|
|
||||||
ctx = context.WithValue(ctx, notary.CtxKeyCryptoSvc, root.trust)
|
|
||||||
|
|
||||||
defer func(ctx context.Context) {
|
|
||||||
ctxu.GetResponseLogger(ctx).Info("response completed")
|
|
||||||
}(ctx)
|
|
||||||
|
|
||||||
if root.auth != nil {
|
|
||||||
ctx = context.WithValue(ctx, notary.CtxKeyRepo, vars["gun"])
|
|
||||||
if ctx, err = root.doAuth(ctx, vars["gun"], w); err != nil {
|
|
||||||
// errors have already been logged/output to w inside doAuth
|
|
||||||
// just return
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err := root.handler(ctx, w, r); err != nil {
|
|
||||||
serveError(log, w, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func serveError(log ctxu.Logger, w http.ResponseWriter, err error) {
|
|
||||||
if httpErr, ok := err.(errcode.Error); ok {
|
|
||||||
// info level logging for non-5XX http errors
|
|
||||||
httpErrCode := httpErr.ErrorCode().Descriptor().HTTPStatusCode
|
|
||||||
if httpErrCode >= http.StatusInternalServerError {
|
|
||||||
// error level logging for 5XX http errors
|
|
||||||
log.Errorf("%s: %s: %v", httpErr.ErrorCode().Error(), httpErr.Message, httpErr.Detail)
|
|
||||||
} else {
|
|
||||||
log.Infof("%s: %s: %v", httpErr.ErrorCode().Error(), httpErr.Message, httpErr.Detail)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
e := errcode.ServeJSON(w, err)
|
|
||||||
if e != nil {
|
|
||||||
log.Error(e)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (root *rootHandler) doAuth(ctx context.Context, gun string, w http.ResponseWriter) (context.Context, error) {
|
|
||||||
var access []auth.Access
|
|
||||||
if gun == "" {
|
|
||||||
access = buildCatalogRecord(root.actions...)
|
|
||||||
} else {
|
|
||||||
access = buildAccessRecords(gun, root.actions...)
|
|
||||||
}
|
|
||||||
|
|
||||||
log := ctxu.GetRequestLogger(ctx)
|
|
||||||
var authCtx context.Context
|
|
||||||
var err error
|
|
||||||
if authCtx, err = root.auth.Authorized(ctx, access...); err != nil {
|
|
||||||
if challenge, ok := err.(auth.Challenge); ok {
|
|
||||||
// Let the challenge write the response.
|
|
||||||
challenge.SetHeaders(w)
|
|
||||||
|
|
||||||
if err := errcode.ServeJSON(w, errcode.ErrorCodeUnauthorized.WithDetail(access)); err != nil {
|
|
||||||
log.Errorf("failed to serve challenge response: %s", err.Error())
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
errcode.ServeJSON(w, errcode.ErrorCodeUnauthorized)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return authCtx, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildAccessRecords(repo string, actions ...string) []auth.Access {
|
|
||||||
requiredAccess := make([]auth.Access, 0, len(actions))
|
|
||||||
for _, action := range actions {
|
|
||||||
requiredAccess = append(requiredAccess, auth.Access{
|
|
||||||
Resource: auth.Resource{
|
|
||||||
Type: "repository",
|
|
||||||
Name: repo,
|
|
||||||
},
|
|
||||||
Action: action,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return requiredAccess
|
|
||||||
}
|
|
||||||
|
|
||||||
// buildCatalogRecord returns the only valid format for the catalog
|
|
||||||
// resource. Only admins can get this access level from the token
|
|
||||||
// server.
|
|
||||||
func buildCatalogRecord(actions ...string) []auth.Access {
|
|
||||||
requiredAccess := []auth.Access{{
|
|
||||||
Resource: auth.Resource{
|
|
||||||
Type: "registry",
|
|
||||||
Name: "catalog",
|
|
||||||
},
|
|
||||||
Action: "*",
|
|
||||||
}}
|
|
||||||
|
|
||||||
return requiredAccess
|
|
||||||
}
|
|
||||||
|
|
||||||
// CacheControlConfig is an interface for something that knows how to set cache
|
|
||||||
// control headers
|
|
||||||
type CacheControlConfig interface {
|
|
||||||
// SetHeaders will actually set the cache control headers on a Headers object
|
|
||||||
SetHeaders(headers http.Header)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewCacheControlConfig returns CacheControlConfig interface for either setting
|
|
||||||
// cache control or disabling cache control entirely
|
|
||||||
func NewCacheControlConfig(maxAgeInSeconds int, mustRevalidate bool) CacheControlConfig {
|
|
||||||
if maxAgeInSeconds > 0 {
|
|
||||||
return PublicCacheControl{MustReValidate: mustRevalidate, MaxAgeInSeconds: maxAgeInSeconds}
|
|
||||||
}
|
|
||||||
return NoCacheControl{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// PublicCacheControl is a set of options that we will set to enable cache control
|
|
||||||
type PublicCacheControl struct {
|
|
||||||
MustReValidate bool
|
|
||||||
MaxAgeInSeconds int
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetHeaders sets the public headers with an optional must-revalidate header
|
|
||||||
func (p PublicCacheControl) SetHeaders(headers http.Header) {
|
|
||||||
cacheControlValue := fmt.Sprintf("public, max-age=%v, s-maxage=%v",
|
|
||||||
p.MaxAgeInSeconds, p.MaxAgeInSeconds)
|
|
||||||
|
|
||||||
if p.MustReValidate {
|
|
||||||
cacheControlValue = fmt.Sprintf("%s, must-revalidate", cacheControlValue)
|
|
||||||
}
|
|
||||||
headers.Set("Cache-Control", cacheControlValue)
|
|
||||||
// delete the Pragma directive, because the only valid value in HTTP is
|
|
||||||
// "no-cache"
|
|
||||||
headers.Del("Pragma")
|
|
||||||
if headers.Get("Last-Modified") == "" {
|
|
||||||
SetLastModifiedHeader(headers, time.Time{})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NoCacheControl is an object which represents a directive to cache nothing
|
|
||||||
type NoCacheControl struct{}
|
|
||||||
|
|
||||||
// SetHeaders sets the public headers cache-control headers and pragma to no-cache
|
|
||||||
func (n NoCacheControl) SetHeaders(headers http.Header) {
|
|
||||||
headers.Set("Cache-Control", "max-age=0, no-cache, no-store")
|
|
||||||
headers.Set("Pragma", "no-cache")
|
|
||||||
}
|
|
||||||
|
|
||||||
// cacheControlResponseWriter wraps an existing response writer, and if Write is
|
|
||||||
// called, will try to set the cache control headers if it can
|
|
||||||
type cacheControlResponseWriter struct {
|
|
||||||
http.ResponseWriter
|
|
||||||
config CacheControlConfig
|
|
||||||
statusCode int
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteHeader stores the header before writing it, so we can tell if it's been set
|
|
||||||
// to a non-200 status code
|
|
||||||
func (c *cacheControlResponseWriter) WriteHeader(statusCode int) {
|
|
||||||
c.statusCode = statusCode
|
|
||||||
c.ResponseWriter.WriteHeader(statusCode)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write will set the cache headers if they haven't already been set and if the status
|
|
||||||
// code has either not been set or set to 200
|
|
||||||
func (c *cacheControlResponseWriter) Write(data []byte) (int, error) {
|
|
||||||
if c.statusCode == http.StatusOK || c.statusCode == 0 {
|
|
||||||
headers := c.ResponseWriter.Header()
|
|
||||||
if headers.Get("Cache-Control") == "" {
|
|
||||||
c.config.SetHeaders(headers)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return c.ResponseWriter.Write(data)
|
|
||||||
}
|
|
||||||
|
|
||||||
type cacheControlHandler struct {
|
|
||||||
http.Handler
|
|
||||||
config CacheControlConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c cacheControlHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|
||||||
c.Handler.ServeHTTP(&cacheControlResponseWriter{ResponseWriter: w, config: c.config}, r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WrapWithCacheHandler wraps another handler in one that can add cache control headers
|
|
||||||
// given a 200 response
|
|
||||||
func WrapWithCacheHandler(ccc CacheControlConfig, handler http.Handler) http.Handler {
|
|
||||||
if ccc != nil {
|
|
||||||
return cacheControlHandler{Handler: handler, config: ccc}
|
|
||||||
}
|
|
||||||
return handler
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetLastModifiedHeader takes a time and uses it to set the LastModified header using
|
|
||||||
// the right date format
|
|
||||||
func SetLastModifiedHeader(headers http.Header, lmt time.Time) {
|
|
||||||
headers.Set("Last-Modified", lmt.Format(time.RFC1123))
|
|
||||||
}
|
|
|
@ -24,7 +24,7 @@ github.com/prometheus/common 4fdc91a58c9d3696b982e8a680f4997403132d44
|
||||||
github.com/golang/protobuf c3cefd437628a0b7d31b34fe44b3a7a540e98527
|
github.com/golang/protobuf c3cefd437628a0b7d31b34fe44b3a7a540e98527
|
||||||
github.com/spf13/cobra f368244301305f414206f889b1735a54cfc8bde8
|
github.com/spf13/cobra f368244301305f414206f889b1735a54cfc8bde8
|
||||||
github.com/spf13/viper be5ff3e4840cf692388bde7a057595a474ef379e
|
github.com/spf13/viper be5ff3e4840cf692388bde7a057595a474ef379e
|
||||||
golang.org/x/crypto 5bcd134fee4dd1475da17714aac19c0aa0142e2f
|
golang.org/x/crypto 76eec36fa14229c4b25bb894c2d0e591527af429
|
||||||
golang.org/x/net 6a513affb38dc9788b449d59ffed099b8de18fa0
|
golang.org/x/net 6a513affb38dc9788b449d59ffed099b8de18fa0
|
||||||
golang.org/x/sys 739734461d1c916b6c72a63d7efda2b27edb369f
|
golang.org/x/sys 739734461d1c916b6c72a63d7efda2b27edb369f
|
||||||
google.golang.org/grpc 708a7f9f3283aa2d4f6132d287d78683babe55c8 # v1.0.5
|
google.golang.org/grpc 708a7f9f3283aa2d4f6132d287d78683babe55c8 # v1.0.5
|
||||||
|
|
|
@ -1,373 +0,0 @@
|
||||||
Mozilla Public License Version 2.0
|
|
||||||
==================================
|
|
||||||
|
|
||||||
1. Definitions
|
|
||||||
--------------
|
|
||||||
|
|
||||||
1.1. "Contributor"
|
|
||||||
means each individual or legal entity that creates, contributes to
|
|
||||||
the creation of, or owns Covered Software.
|
|
||||||
|
|
||||||
1.2. "Contributor Version"
|
|
||||||
means the combination of the Contributions of others (if any) used
|
|
||||||
by a Contributor and that particular Contributor's Contribution.
|
|
||||||
|
|
||||||
1.3. "Contribution"
|
|
||||||
means Covered Software of a particular Contributor.
|
|
||||||
|
|
||||||
1.4. "Covered Software"
|
|
||||||
means Source Code Form to which the initial Contributor has attached
|
|
||||||
the notice in Exhibit A, the Executable Form of such Source Code
|
|
||||||
Form, and Modifications of such Source Code Form, in each case
|
|
||||||
including portions thereof.
|
|
||||||
|
|
||||||
1.5. "Incompatible With Secondary Licenses"
|
|
||||||
means
|
|
||||||
|
|
||||||
(a) that the initial Contributor has attached the notice described
|
|
||||||
in Exhibit B to the Covered Software; or
|
|
||||||
|
|
||||||
(b) that the Covered Software was made available under the terms of
|
|
||||||
version 1.1 or earlier of the License, but not also under the
|
|
||||||
terms of a Secondary License.
|
|
||||||
|
|
||||||
1.6. "Executable Form"
|
|
||||||
means any form of the work other than Source Code Form.
|
|
||||||
|
|
||||||
1.7. "Larger Work"
|
|
||||||
means a work that combines Covered Software with other material, in
|
|
||||||
a separate file or files, that is not Covered Software.
|
|
||||||
|
|
||||||
1.8. "License"
|
|
||||||
means this document.
|
|
||||||
|
|
||||||
1.9. "Licensable"
|
|
||||||
means having the right to grant, to the maximum extent possible,
|
|
||||||
whether at the time of the initial grant or subsequently, any and
|
|
||||||
all of the rights conveyed by this License.
|
|
||||||
|
|
||||||
1.10. "Modifications"
|
|
||||||
means any of the following:
|
|
||||||
|
|
||||||
(a) any file in Source Code Form that results from an addition to,
|
|
||||||
deletion from, or modification of the contents of Covered
|
|
||||||
Software; or
|
|
||||||
|
|
||||||
(b) any new file in Source Code Form that contains any Covered
|
|
||||||
Software.
|
|
||||||
|
|
||||||
1.11. "Patent Claims" of a Contributor
|
|
||||||
means any patent claim(s), including without limitation, method,
|
|
||||||
process, and apparatus claims, in any patent Licensable by such
|
|
||||||
Contributor that would be infringed, but for the grant of the
|
|
||||||
License, by the making, using, selling, offering for sale, having
|
|
||||||
made, import, or transfer of either its Contributions or its
|
|
||||||
Contributor Version.
|
|
||||||
|
|
||||||
1.12. "Secondary License"
|
|
||||||
means either the GNU General Public License, Version 2.0, the GNU
|
|
||||||
Lesser General Public License, Version 2.1, the GNU Affero General
|
|
||||||
Public License, Version 3.0, or any later versions of those
|
|
||||||
licenses.
|
|
||||||
|
|
||||||
1.13. "Source Code Form"
|
|
||||||
means the form of the work preferred for making modifications.
|
|
||||||
|
|
||||||
1.14. "You" (or "Your")
|
|
||||||
means an individual or a legal entity exercising rights under this
|
|
||||||
License. For legal entities, "You" includes any entity that
|
|
||||||
controls, is controlled by, or is under common control with You. For
|
|
||||||
purposes of this definition, "control" means (a) the power, direct
|
|
||||||
or indirect, to cause the direction or management of such entity,
|
|
||||||
whether by contract or otherwise, or (b) ownership of more than
|
|
||||||
fifty percent (50%) of the outstanding shares or beneficial
|
|
||||||
ownership of such entity.
|
|
||||||
|
|
||||||
2. License Grants and Conditions
|
|
||||||
--------------------------------
|
|
||||||
|
|
||||||
2.1. Grants
|
|
||||||
|
|
||||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
|
||||||
non-exclusive license:
|
|
||||||
|
|
||||||
(a) under intellectual property rights (other than patent or trademark)
|
|
||||||
Licensable by such Contributor to use, reproduce, make available,
|
|
||||||
modify, display, perform, distribute, and otherwise exploit its
|
|
||||||
Contributions, either on an unmodified basis, with Modifications, or
|
|
||||||
as part of a Larger Work; and
|
|
||||||
|
|
||||||
(b) under Patent Claims of such Contributor to make, use, sell, offer
|
|
||||||
for sale, have made, import, and otherwise transfer either its
|
|
||||||
Contributions or its Contributor Version.
|
|
||||||
|
|
||||||
2.2. Effective Date
|
|
||||||
|
|
||||||
The licenses granted in Section 2.1 with respect to any Contribution
|
|
||||||
become effective for each Contribution on the date the Contributor first
|
|
||||||
distributes such Contribution.
|
|
||||||
|
|
||||||
2.3. Limitations on Grant Scope
|
|
||||||
|
|
||||||
The licenses granted in this Section 2 are the only rights granted under
|
|
||||||
this License. No additional rights or licenses will be implied from the
|
|
||||||
distribution or licensing of Covered Software under this License.
|
|
||||||
Notwithstanding Section 2.1(b) above, no patent license is granted by a
|
|
||||||
Contributor:
|
|
||||||
|
|
||||||
(a) for any code that a Contributor has removed from Covered Software;
|
|
||||||
or
|
|
||||||
|
|
||||||
(b) for infringements caused by: (i) Your and any other third party's
|
|
||||||
modifications of Covered Software, or (ii) the combination of its
|
|
||||||
Contributions with other software (except as part of its Contributor
|
|
||||||
Version); or
|
|
||||||
|
|
||||||
(c) under Patent Claims infringed by Covered Software in the absence of
|
|
||||||
its Contributions.
|
|
||||||
|
|
||||||
This License does not grant any rights in the trademarks, service marks,
|
|
||||||
or logos of any Contributor (except as may be necessary to comply with
|
|
||||||
the notice requirements in Section 3.4).
|
|
||||||
|
|
||||||
2.4. Subsequent Licenses
|
|
||||||
|
|
||||||
No Contributor makes additional grants as a result of Your choice to
|
|
||||||
distribute the Covered Software under a subsequent version of this
|
|
||||||
License (see Section 10.2) or under the terms of a Secondary License (if
|
|
||||||
permitted under the terms of Section 3.3).
|
|
||||||
|
|
||||||
2.5. Representation
|
|
||||||
|
|
||||||
Each Contributor represents that the Contributor believes its
|
|
||||||
Contributions are its original creation(s) or it has sufficient rights
|
|
||||||
to grant the rights to its Contributions conveyed by this License.
|
|
||||||
|
|
||||||
2.6. Fair Use
|
|
||||||
|
|
||||||
This License is not intended to limit any rights You have under
|
|
||||||
applicable copyright doctrines of fair use, fair dealing, or other
|
|
||||||
equivalents.
|
|
||||||
|
|
||||||
2.7. Conditions
|
|
||||||
|
|
||||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
|
|
||||||
in Section 2.1.
|
|
||||||
|
|
||||||
3. Responsibilities
|
|
||||||
-------------------
|
|
||||||
|
|
||||||
3.1. Distribution of Source Form
|
|
||||||
|
|
||||||
All distribution of Covered Software in Source Code Form, including any
|
|
||||||
Modifications that You create or to which You contribute, must be under
|
|
||||||
the terms of this License. You must inform recipients that the Source
|
|
||||||
Code Form of the Covered Software is governed by the terms of this
|
|
||||||
License, and how they can obtain a copy of this License. You may not
|
|
||||||
attempt to alter or restrict the recipients' rights in the Source Code
|
|
||||||
Form.
|
|
||||||
|
|
||||||
3.2. Distribution of Executable Form
|
|
||||||
|
|
||||||
If You distribute Covered Software in Executable Form then:
|
|
||||||
|
|
||||||
(a) such Covered Software must also be made available in Source Code
|
|
||||||
Form, as described in Section 3.1, and You must inform recipients of
|
|
||||||
the Executable Form how they can obtain a copy of such Source Code
|
|
||||||
Form by reasonable means in a timely manner, at a charge no more
|
|
||||||
than the cost of distribution to the recipient; and
|
|
||||||
|
|
||||||
(b) You may distribute such Executable Form under the terms of this
|
|
||||||
License, or sublicense it under different terms, provided that the
|
|
||||||
license for the Executable Form does not attempt to limit or alter
|
|
||||||
the recipients' rights in the Source Code Form under this License.
|
|
||||||
|
|
||||||
3.3. Distribution of a Larger Work
|
|
||||||
|
|
||||||
You may create and distribute a Larger Work under terms of Your choice,
|
|
||||||
provided that You also comply with the requirements of this License for
|
|
||||||
the Covered Software. If the Larger Work is a combination of Covered
|
|
||||||
Software with a work governed by one or more Secondary Licenses, and the
|
|
||||||
Covered Software is not Incompatible With Secondary Licenses, this
|
|
||||||
License permits You to additionally distribute such Covered Software
|
|
||||||
under the terms of such Secondary License(s), so that the recipient of
|
|
||||||
the Larger Work may, at their option, further distribute the Covered
|
|
||||||
Software under the terms of either this License or such Secondary
|
|
||||||
License(s).
|
|
||||||
|
|
||||||
3.4. Notices
|
|
||||||
|
|
||||||
You may not remove or alter the substance of any license notices
|
|
||||||
(including copyright notices, patent notices, disclaimers of warranty,
|
|
||||||
or limitations of liability) contained within the Source Code Form of
|
|
||||||
the Covered Software, except that You may alter any license notices to
|
|
||||||
the extent required to remedy known factual inaccuracies.
|
|
||||||
|
|
||||||
3.5. Application of Additional Terms
|
|
||||||
|
|
||||||
You may choose to offer, and to charge a fee for, warranty, support,
|
|
||||||
indemnity or liability obligations to one or more recipients of Covered
|
|
||||||
Software. However, You may do so only on Your own behalf, and not on
|
|
||||||
behalf of any Contributor. You must make it absolutely clear that any
|
|
||||||
such warranty, support, indemnity, or liability obligation is offered by
|
|
||||||
You alone, and You hereby agree to indemnify every Contributor for any
|
|
||||||
liability incurred by such Contributor as a result of warranty, support,
|
|
||||||
indemnity or liability terms You offer. You may include additional
|
|
||||||
disclaimers of warranty and limitations of liability specific to any
|
|
||||||
jurisdiction.
|
|
||||||
|
|
||||||
4. Inability to Comply Due to Statute or Regulation
|
|
||||||
---------------------------------------------------
|
|
||||||
|
|
||||||
If it is impossible for You to comply with any of the terms of this
|
|
||||||
License with respect to some or all of the Covered Software due to
|
|
||||||
statute, judicial order, or regulation then You must: (a) comply with
|
|
||||||
the terms of this License to the maximum extent possible; and (b)
|
|
||||||
describe the limitations and the code they affect. Such description must
|
|
||||||
be placed in a text file included with all distributions of the Covered
|
|
||||||
Software under this License. Except to the extent prohibited by statute
|
|
||||||
or regulation, such description must be sufficiently detailed for a
|
|
||||||
recipient of ordinary skill to be able to understand it.
|
|
||||||
|
|
||||||
5. Termination
|
|
||||||
--------------
|
|
||||||
|
|
||||||
5.1. The rights granted under this License will terminate automatically
|
|
||||||
if You fail to comply with any of its terms. However, if You become
|
|
||||||
compliant, then the rights granted under this License from a particular
|
|
||||||
Contributor are reinstated (a) provisionally, unless and until such
|
|
||||||
Contributor explicitly and finally terminates Your grants, and (b) on an
|
|
||||||
ongoing basis, if such Contributor fails to notify You of the
|
|
||||||
non-compliance by some reasonable means prior to 60 days after You have
|
|
||||||
come back into compliance. Moreover, Your grants from a particular
|
|
||||||
Contributor are reinstated on an ongoing basis if such Contributor
|
|
||||||
notifies You of the non-compliance by some reasonable means, this is the
|
|
||||||
first time You have received notice of non-compliance with this License
|
|
||||||
from such Contributor, and You become compliant prior to 30 days after
|
|
||||||
Your receipt of the notice.
|
|
||||||
|
|
||||||
5.2. If You initiate litigation against any entity by asserting a patent
|
|
||||||
infringement claim (excluding declaratory judgment actions,
|
|
||||||
counter-claims, and cross-claims) alleging that a Contributor Version
|
|
||||||
directly or indirectly infringes any patent, then the rights granted to
|
|
||||||
You by any and all Contributors for the Covered Software under Section
|
|
||||||
2.1 of this License shall terminate.
|
|
||||||
|
|
||||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all
|
|
||||||
end user license agreements (excluding distributors and resellers) which
|
|
||||||
have been validly granted by You or Your distributors under this License
|
|
||||||
prior to termination shall survive termination.
|
|
||||||
|
|
||||||
************************************************************************
|
|
||||||
* *
|
|
||||||
* 6. Disclaimer of Warranty *
|
|
||||||
* ------------------------- *
|
|
||||||
* *
|
|
||||||
* Covered Software is provided under this License on an "as is" *
|
|
||||||
* basis, without warranty of any kind, either expressed, implied, or *
|
|
||||||
* statutory, including, without limitation, warranties that the *
|
|
||||||
* Covered Software is free of defects, merchantable, fit for a *
|
|
||||||
* particular purpose or non-infringing. The entire risk as to the *
|
|
||||||
* quality and performance of the Covered Software is with You. *
|
|
||||||
* Should any Covered Software prove defective in any respect, You *
|
|
||||||
* (not any Contributor) assume the cost of any necessary servicing, *
|
|
||||||
* repair, or correction. This disclaimer of warranty constitutes an *
|
|
||||||
* essential part of this License. No use of any Covered Software is *
|
|
||||||
* authorized under this License except under this disclaimer. *
|
|
||||||
* *
|
|
||||||
************************************************************************
|
|
||||||
|
|
||||||
************************************************************************
|
|
||||||
* *
|
|
||||||
* 7. Limitation of Liability *
|
|
||||||
* -------------------------- *
|
|
||||||
* *
|
|
||||||
* Under no circumstances and under no legal theory, whether tort *
|
|
||||||
* (including negligence), contract, or otherwise, shall any *
|
|
||||||
* Contributor, or anyone who distributes Covered Software as *
|
|
||||||
* permitted above, be liable to You for any direct, indirect, *
|
|
||||||
* special, incidental, or consequential damages of any character *
|
|
||||||
* including, without limitation, damages for lost profits, loss of *
|
|
||||||
* goodwill, work stoppage, computer failure or malfunction, or any *
|
|
||||||
* and all other commercial damages or losses, even if such party *
|
|
||||||
* shall have been informed of the possibility of such damages. This *
|
|
||||||
* limitation of liability shall not apply to liability for death or *
|
|
||||||
* personal injury resulting from such party's negligence to the *
|
|
||||||
* extent applicable law prohibits such limitation. Some *
|
|
||||||
* jurisdictions do not allow the exclusion or limitation of *
|
|
||||||
* incidental or consequential damages, so this exclusion and *
|
|
||||||
* limitation may not apply to You. *
|
|
||||||
* *
|
|
||||||
************************************************************************
|
|
||||||
|
|
||||||
8. Litigation
|
|
||||||
-------------
|
|
||||||
|
|
||||||
Any litigation relating to this License may be brought only in the
|
|
||||||
courts of a jurisdiction where the defendant maintains its principal
|
|
||||||
place of business and such litigation shall be governed by laws of that
|
|
||||||
jurisdiction, without reference to its conflict-of-law provisions.
|
|
||||||
Nothing in this Section shall prevent a party's ability to bring
|
|
||||||
cross-claims or counter-claims.
|
|
||||||
|
|
||||||
9. Miscellaneous
|
|
||||||
----------------
|
|
||||||
|
|
||||||
This License represents the complete agreement concerning the subject
|
|
||||||
matter hereof. If any provision of this License is held to be
|
|
||||||
unenforceable, such provision shall be reformed only to the extent
|
|
||||||
necessary to make it enforceable. Any law or regulation which provides
|
|
||||||
that the language of a contract shall be construed against the drafter
|
|
||||||
shall not be used to construe this License against a Contributor.
|
|
||||||
|
|
||||||
10. Versions of the License
|
|
||||||
---------------------------
|
|
||||||
|
|
||||||
10.1. New Versions
|
|
||||||
|
|
||||||
Mozilla Foundation is the license steward. Except as provided in Section
|
|
||||||
10.3, no one other than the license steward has the right to modify or
|
|
||||||
publish new versions of this License. Each version will be given a
|
|
||||||
distinguishing version number.
|
|
||||||
|
|
||||||
10.2. Effect of New Versions
|
|
||||||
|
|
||||||
You may distribute the Covered Software under the terms of the version
|
|
||||||
of the License under which You originally received the Covered Software,
|
|
||||||
or under the terms of any subsequent version published by the license
|
|
||||||
steward.
|
|
||||||
|
|
||||||
10.3. Modified Versions
|
|
||||||
|
|
||||||
If you create software not governed by this License, and you want to
|
|
||||||
create a new license for such software, you may create and use a
|
|
||||||
modified version of this License if you rename the license and remove
|
|
||||||
any references to the name of the license steward (except to note that
|
|
||||||
such modified license differs from this License).
|
|
||||||
|
|
||||||
10.4. Distributing Source Code Form that is Incompatible With Secondary
|
|
||||||
Licenses
|
|
||||||
|
|
||||||
If You choose to distribute Source Code Form that is Incompatible With
|
|
||||||
Secondary Licenses under the terms of this version of the License, the
|
|
||||||
notice described in Exhibit B of this License must be attached.
|
|
||||||
|
|
||||||
Exhibit A - Source Code Form License Notice
|
|
||||||
-------------------------------------------
|
|
||||||
|
|
||||||
This Source Code Form is subject to the terms of the Mozilla Public
|
|
||||||
License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
||||||
file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
|
||||||
|
|
||||||
If it is not possible or desirable to put the notice in a particular
|
|
||||||
file, then You may include the notice in a location (such as a LICENSE
|
|
||||||
file in a relevant directory) where a recipient would be likely to look
|
|
||||||
for such a notice.
|
|
||||||
|
|
||||||
You may add additional accurate notices of copyright ownership.
|
|
||||||
|
|
||||||
Exhibit B - "Incompatible With Secondary Licenses" Notice
|
|
||||||
---------------------------------------------------------
|
|
||||||
|
|
||||||
This Source Code Form is "Incompatible With Secondary Licenses", as
|
|
||||||
defined by the Mozilla Public License, v. 2.0.
|
|
|
@ -1,443 +0,0 @@
|
||||||
# Go-MySQL-Driver
|
|
||||||
|
|
||||||
A MySQL-Driver for Go's [database/sql](http://golang.org/pkg/database/sql) package
|
|
||||||
|
|
||||||
![Go-MySQL-Driver logo](https://raw.github.com/wiki/go-sql-driver/mysql/gomysql_m.png "Golang Gopher holding the MySQL Dolphin")
|
|
||||||
|
|
||||||
---------------------------------------
|
|
||||||
* [Features](#features)
|
|
||||||
* [Requirements](#requirements)
|
|
||||||
* [Installation](#installation)
|
|
||||||
* [Usage](#usage)
|
|
||||||
* [DSN (Data Source Name)](#dsn-data-source-name)
|
|
||||||
* [Password](#password)
|
|
||||||
* [Protocol](#protocol)
|
|
||||||
* [Address](#address)
|
|
||||||
* [Parameters](#parameters)
|
|
||||||
* [Examples](#examples)
|
|
||||||
* [LOAD DATA LOCAL INFILE support](#load-data-local-infile-support)
|
|
||||||
* [time.Time support](#timetime-support)
|
|
||||||
* [Unicode support](#unicode-support)
|
|
||||||
* [Testing / Development](#testing--development)
|
|
||||||
* [License](#license)
|
|
||||||
|
|
||||||
---------------------------------------
|
|
||||||
|
|
||||||
## Features
|
|
||||||
* Lightweight and [fast](https://github.com/go-sql-driver/sql-benchmark "golang MySQL-Driver performance")
|
|
||||||
* Native Go implementation. No C-bindings, just pure Go
|
|
||||||
* Connections over TCP/IPv4, TCP/IPv6, Unix domain sockets or [custom protocols](http://godoc.org/github.com/go-sql-driver/mysql#DialFunc)
|
|
||||||
* Automatic handling of broken connections
|
|
||||||
* Automatic Connection Pooling *(by database/sql package)*
|
|
||||||
* Supports queries larger than 16MB
|
|
||||||
* Full [`sql.RawBytes`](http://golang.org/pkg/database/sql/#RawBytes) support.
|
|
||||||
* Intelligent `LONG DATA` handling in prepared statements
|
|
||||||
* Secure `LOAD DATA LOCAL INFILE` support with file Whitelisting and `io.Reader` support
|
|
||||||
* Optional `time.Time` parsing
|
|
||||||
* Optional placeholder interpolation
|
|
||||||
|
|
||||||
## Requirements
|
|
||||||
* Go 1.2 or higher
|
|
||||||
* MySQL (4.1+), MariaDB, Percona Server, Google CloudSQL or Sphinx (2.2.3+)
|
|
||||||
|
|
||||||
---------------------------------------
|
|
||||||
|
|
||||||
## Installation
|
|
||||||
Simple install the package to your [$GOPATH](http://code.google.com/p/go-wiki/wiki/GOPATH "GOPATH") with the [go tool](http://golang.org/cmd/go/ "go command") from shell:
|
|
||||||
```bash
|
|
||||||
$ go get github.com/go-sql-driver/mysql
|
|
||||||
```
|
|
||||||
Make sure [Git is installed](http://git-scm.com/downloads) on your machine and in your system's `PATH`.
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
_Go MySQL Driver_ is an implementation of Go's `database/sql/driver` interface. You only need to import the driver and can use the full [`database/sql`](http://golang.org/pkg/database/sql) API then.
|
|
||||||
|
|
||||||
Use `mysql` as `driverName` and a valid [DSN](#dsn-data-source-name) as `dataSourceName`:
|
|
||||||
```go
|
|
||||||
import "database/sql"
|
|
||||||
import _ "github.com/go-sql-driver/mysql"
|
|
||||||
|
|
||||||
db, err := sql.Open("mysql", "user:password@/dbname")
|
|
||||||
```
|
|
||||||
|
|
||||||
[Examples are available in our Wiki](https://github.com/go-sql-driver/mysql/wiki/Examples "Go-MySQL-Driver Examples").
|
|
||||||
|
|
||||||
|
|
||||||
### DSN (Data Source Name)
|
|
||||||
|
|
||||||
The Data Source Name has a common format, like e.g. [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php) uses it, but without type-prefix (optional parts marked by squared brackets):
|
|
||||||
```
|
|
||||||
[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN]
|
|
||||||
```
|
|
||||||
|
|
||||||
A DSN in its fullest form:
|
|
||||||
```
|
|
||||||
username:password@protocol(address)/dbname?param=value
|
|
||||||
```
|
|
||||||
|
|
||||||
Except for the databasename, all values are optional. So the minimal DSN is:
|
|
||||||
```
|
|
||||||
/dbname
|
|
||||||
```
|
|
||||||
|
|
||||||
If you do not want to preselect a database, leave `dbname` empty:
|
|
||||||
```
|
|
||||||
/
|
|
||||||
```
|
|
||||||
This has the same effect as an empty DSN string:
|
|
||||||
```
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
Alternatively, [Config.FormatDSN](https://godoc.org/github.com/go-sql-driver/mysql#Config.FormatDSN) can be used to create a DSN string by filling a struct.
|
|
||||||
|
|
||||||
#### Password
|
|
||||||
Passwords can consist of any character. Escaping is **not** necessary.
|
|
||||||
|
|
||||||
#### Protocol
|
|
||||||
See [net.Dial](http://golang.org/pkg/net/#Dial) for more information which networks are available.
|
|
||||||
In general you should use an Unix domain socket if available and TCP otherwise for best performance.
|
|
||||||
|
|
||||||
#### Address
|
|
||||||
For TCP and UDP networks, addresses have the form `host:port`.
|
|
||||||
If `host` is a literal IPv6 address, it must be enclosed in square brackets.
|
|
||||||
The functions [net.JoinHostPort](http://golang.org/pkg/net/#JoinHostPort) and [net.SplitHostPort](http://golang.org/pkg/net/#SplitHostPort) manipulate addresses in this form.
|
|
||||||
|
|
||||||
For Unix domain sockets the address is the absolute path to the MySQL-Server-socket, e.g. `/var/run/mysqld/mysqld.sock` or `/tmp/mysql.sock`.
|
|
||||||
|
|
||||||
#### Parameters
|
|
||||||
*Parameters are case-sensitive!*
|
|
||||||
|
|
||||||
Notice that any of `true`, `TRUE`, `True` or `1` is accepted to stand for a true boolean value. Not surprisingly, false can be specified as any of: `false`, `FALSE`, `False` or `0`.
|
|
||||||
|
|
||||||
##### `allowAllFiles`
|
|
||||||
|
|
||||||
```
|
|
||||||
Type: bool
|
|
||||||
Valid Values: true, false
|
|
||||||
Default: false
|
|
||||||
```
|
|
||||||
|
|
||||||
`allowAllFiles=true` disables the file Whitelist for `LOAD DATA LOCAL INFILE` and allows *all* files.
|
|
||||||
[*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)
|
|
||||||
|
|
||||||
##### `allowCleartextPasswords`
|
|
||||||
|
|
||||||
```
|
|
||||||
Type: bool
|
|
||||||
Valid Values: true, false
|
|
||||||
Default: false
|
|
||||||
```
|
|
||||||
|
|
||||||
`allowCleartextPasswords=true` allows using the [cleartext client side plugin](http://dev.mysql.com/doc/en/cleartext-authentication-plugin.html) if required by an account, such as one defined with the [PAM authentication plugin](http://dev.mysql.com/doc/en/pam-authentication-plugin.html). Sending passwords in clear text may be a security problem in some configurations. To avoid problems if there is any possibility that the password would be intercepted, clients should connect to MySQL Server using a method that protects the password. Possibilities include [TLS / SSL](#tls), IPsec, or a private network.
|
|
||||||
|
|
||||||
##### `allowNativePasswords`
|
|
||||||
|
|
||||||
```
|
|
||||||
Type: bool
|
|
||||||
Valid Values: true, false
|
|
||||||
Default: false
|
|
||||||
```
|
|
||||||
`allowNativePasswords=true` allows the usage of the mysql native password method.
|
|
||||||
|
|
||||||
##### `allowOldPasswords`
|
|
||||||
|
|
||||||
```
|
|
||||||
Type: bool
|
|
||||||
Valid Values: true, false
|
|
||||||
Default: false
|
|
||||||
```
|
|
||||||
`allowOldPasswords=true` allows the usage of the insecure old password method. This should be avoided, but is necessary in some cases. See also [the old_passwords wiki page](https://github.com/go-sql-driver/mysql/wiki/old_passwords).
|
|
||||||
|
|
||||||
##### `charset`
|
|
||||||
|
|
||||||
```
|
|
||||||
Type: string
|
|
||||||
Valid Values: <name>
|
|
||||||
Default: none
|
|
||||||
```
|
|
||||||
|
|
||||||
Sets the charset used for client-server interaction (`"SET NAMES <value>"`). If multiple charsets are set (separated by a comma), the following charset is used if setting the charset failes. This enables for example support for `utf8mb4` ([introduced in MySQL 5.5.3](http://dev.mysql.com/doc/refman/5.5/en/charset-unicode-utf8mb4.html)) with fallback to `utf8` for older servers (`charset=utf8mb4,utf8`).
|
|
||||||
|
|
||||||
Usage of the `charset` parameter is discouraged because it issues additional queries to the server.
|
|
||||||
Unless you need the fallback behavior, please use `collation` instead.
|
|
||||||
|
|
||||||
##### `collation`
|
|
||||||
|
|
||||||
```
|
|
||||||
Type: string
|
|
||||||
Valid Values: <name>
|
|
||||||
Default: utf8_general_ci
|
|
||||||
```
|
|
||||||
|
|
||||||
Sets the collation used for client-server interaction on connection. In contrast to `charset`, `collation` does not issue additional queries. If the specified collation is unavailable on the target server, the connection will fail.
|
|
||||||
|
|
||||||
A list of valid charsets for a server is retrievable with `SHOW COLLATION`.
|
|
||||||
|
|
||||||
##### `clientFoundRows`
|
|
||||||
|
|
||||||
```
|
|
||||||
Type: bool
|
|
||||||
Valid Values: true, false
|
|
||||||
Default: false
|
|
||||||
```
|
|
||||||
|
|
||||||
`clientFoundRows=true` causes an UPDATE to return the number of matching rows instead of the number of rows changed.
|
|
||||||
|
|
||||||
##### `columnsWithAlias`
|
|
||||||
|
|
||||||
```
|
|
||||||
Type: bool
|
|
||||||
Valid Values: true, false
|
|
||||||
Default: false
|
|
||||||
```
|
|
||||||
|
|
||||||
When `columnsWithAlias` is true, calls to `sql.Rows.Columns()` will return the table alias and the column name separated by a dot. For example:
|
|
||||||
|
|
||||||
```
|
|
||||||
SELECT u.id FROM users as u
|
|
||||||
```
|
|
||||||
|
|
||||||
will return `u.id` instead of just `id` if `columnsWithAlias=true`.
|
|
||||||
|
|
||||||
##### `interpolateParams`
|
|
||||||
|
|
||||||
```
|
|
||||||
Type: bool
|
|
||||||
Valid Values: true, false
|
|
||||||
Default: false
|
|
||||||
```
|
|
||||||
|
|
||||||
If `interpolateParams` is true, placeholders (`?`) in calls to `db.Query()` and `db.Exec()` are interpolated into a single query string with given parameters. This reduces the number of roundtrips, since the driver has to prepare a statement, execute it with given parameters and close the statement again with `interpolateParams=false`.
|
|
||||||
|
|
||||||
*This can not be used together with the multibyte encodings BIG5, CP932, GB2312, GBK or SJIS. These are blacklisted as they may [introduce a SQL injection vulnerability](http://stackoverflow.com/a/12118602/3430118)!*
|
|
||||||
|
|
||||||
##### `loc`
|
|
||||||
|
|
||||||
```
|
|
||||||
Type: string
|
|
||||||
Valid Values: <escaped name>
|
|
||||||
Default: UTC
|
|
||||||
```
|
|
||||||
|
|
||||||
Sets the location for time.Time values (when using `parseTime=true`). *"Local"* sets the system's location. See [time.LoadLocation](http://golang.org/pkg/time/#LoadLocation) for details.
|
|
||||||
|
|
||||||
Note that this sets the location for time.Time values but does not change MySQL's [time_zone setting](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html). For that see the [time_zone system variable](#system-variables), which can also be set as a DSN parameter.
|
|
||||||
|
|
||||||
Please keep in mind, that param values must be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed. Alternatively you can manually replace the `/` with `%2F`. For example `US/Pacific` would be `loc=US%2FPacific`.
|
|
||||||
|
|
||||||
##### `maxAllowedPacket`
|
|
||||||
```
|
|
||||||
Type: decimal number
|
|
||||||
Default: 0
|
|
||||||
```
|
|
||||||
|
|
||||||
Max packet size allowed in bytes. Use `maxAllowedPacket=0` to automatically fetch the `max_allowed_packet` variable from server.
|
|
||||||
|
|
||||||
##### `multiStatements`
|
|
||||||
|
|
||||||
```
|
|
||||||
Type: bool
|
|
||||||
Valid Values: true, false
|
|
||||||
Default: false
|
|
||||||
```
|
|
||||||
|
|
||||||
Allow multiple statements in one query. While this allows batch queries, it also greatly increases the risk of SQL injections. Only the result of the first query is returned, all other results are silently discarded.
|
|
||||||
|
|
||||||
When `multiStatements` is used, `?` parameters must only be used in the first statement.
|
|
||||||
|
|
||||||
##### `parseTime`
|
|
||||||
|
|
||||||
```
|
|
||||||
Type: bool
|
|
||||||
Valid Values: true, false
|
|
||||||
Default: false
|
|
||||||
```
|
|
||||||
|
|
||||||
`parseTime=true` changes the output type of `DATE` and `DATETIME` values to `time.Time` instead of `[]byte` / `string`
|
|
||||||
|
|
||||||
|
|
||||||
##### `readTimeout`
|
|
||||||
|
|
||||||
```
|
|
||||||
Type: decimal number
|
|
||||||
Default: 0
|
|
||||||
```
|
|
||||||
|
|
||||||
I/O read timeout. The value must be a decimal number with an unit suffix ( *"ms"*, *"s"*, *"m"*, *"h"* ), such as *"30s"*, *"0.5m"* or *"1m30s"*.
|
|
||||||
|
|
||||||
##### `strict`
|
|
||||||
|
|
||||||
```
|
|
||||||
Type: bool
|
|
||||||
Valid Values: true, false
|
|
||||||
Default: false
|
|
||||||
```
|
|
||||||
|
|
||||||
`strict=true` enables a driver-side strict mode in which MySQL warnings are treated as errors. This mode should not be used in production as it may lead to data corruption in certain situations.
|
|
||||||
|
|
||||||
A server-side strict mode, which is safe for production use, can be set via the [`sql_mode`](https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html) system variable.
|
|
||||||
|
|
||||||
By default MySQL also treats notes as warnings. Use [`sql_notes=false`](http://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_sql_notes) to ignore notes.
|
|
||||||
|
|
||||||
##### `timeout`
|
|
||||||
|
|
||||||
```
|
|
||||||
Type: decimal number
|
|
||||||
Default: OS default
|
|
||||||
```
|
|
||||||
|
|
||||||
*Driver* side connection timeout. The value must be a decimal number with an unit suffix ( *"ms"*, *"s"*, *"m"*, *"h"* ), such as *"30s"*, *"0.5m"* or *"1m30s"*. To set a server side timeout, use the parameter [`wait_timeout`](http://dev.mysql.com/doc/refman/5.6/en/server-system-variables.html#sysvar_wait_timeout).
|
|
||||||
|
|
||||||
##### `tls`
|
|
||||||
|
|
||||||
```
|
|
||||||
Type: bool / string
|
|
||||||
Valid Values: true, false, skip-verify, <name>
|
|
||||||
Default: false
|
|
||||||
```
|
|
||||||
|
|
||||||
`tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side). Use a custom value registered with [`mysql.RegisterTLSConfig`](http://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig).
|
|
||||||
|
|
||||||
##### `writeTimeout`
|
|
||||||
|
|
||||||
```
|
|
||||||
Type: decimal number
|
|
||||||
Default: 0
|
|
||||||
```
|
|
||||||
|
|
||||||
I/O write timeout. The value must be a decimal number with an unit suffix ( *"ms"*, *"s"*, *"m"*, *"h"* ), such as *"30s"*, *"0.5m"* or *"1m30s"*.
|
|
||||||
|
|
||||||
|
|
||||||
##### System Variables
|
|
||||||
|
|
||||||
Any other parameters are interpreted as system variables:
|
|
||||||
* `<boolean_var>=<value>`: `SET <boolean_var>=<value>`
|
|
||||||
* `<enum_var>=<value>`: `SET <enum_var>=<value>`
|
|
||||||
* `<string_var>=%27<value>%27`: `SET <string_var>='<value>'`
|
|
||||||
|
|
||||||
Rules:
|
|
||||||
* The values for string variables must be quoted with '
|
|
||||||
* The values must also be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed!
|
|
||||||
(which implies values of string variables must be wrapped with `%27`)
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
* `autocommit=1`: `SET autocommit=1`
|
|
||||||
* [`time_zone=%27Europe%2FParis%27`](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html): `SET time_zone='Europe/Paris'`
|
|
||||||
* [`tx_isolation=%27REPEATABLE-READ%27`](https://dev.mysql.com/doc/refman/5.5/en/server-system-variables.html#sysvar_tx_isolation): `SET tx_isolation='REPEATABLE-READ'`
|
|
||||||
|
|
||||||
|
|
||||||
#### Examples
|
|
||||||
```
|
|
||||||
user@unix(/path/to/socket)/dbname
|
|
||||||
```
|
|
||||||
|
|
||||||
```
|
|
||||||
root:pw@unix(/tmp/mysql.sock)/myDatabase?loc=Local
|
|
||||||
```
|
|
||||||
|
|
||||||
```
|
|
||||||
user:password@tcp(localhost:5555)/dbname?tls=skip-verify&autocommit=true
|
|
||||||
```
|
|
||||||
|
|
||||||
Treat warnings as errors by setting the system variable [`sql_mode`](https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html):
|
|
||||||
```
|
|
||||||
user:password@/dbname?sql_mode=TRADITIONAL
|
|
||||||
```
|
|
||||||
|
|
||||||
TCP via IPv6:
|
|
||||||
```
|
|
||||||
user:password@tcp([de:ad:be:ef::ca:fe]:80)/dbname?timeout=90s&collation=utf8mb4_unicode_ci
|
|
||||||
```
|
|
||||||
|
|
||||||
TCP on a remote host, e.g. Amazon RDS:
|
|
||||||
```
|
|
||||||
id:password@tcp(your-amazonaws-uri.com:3306)/dbname
|
|
||||||
```
|
|
||||||
|
|
||||||
Google Cloud SQL on App Engine (First Generation MySQL Server):
|
|
||||||
```
|
|
||||||
user@cloudsql(project-id:instance-name)/dbname
|
|
||||||
```
|
|
||||||
|
|
||||||
Google Cloud SQL on App Engine (Second Generation MySQL Server):
|
|
||||||
```
|
|
||||||
user@cloudsql(project-id:regionname:instance-name)/dbname
|
|
||||||
```
|
|
||||||
|
|
||||||
TCP using default port (3306) on localhost:
|
|
||||||
```
|
|
||||||
user:password@tcp/dbname?charset=utf8mb4,utf8&sys_var=esc%40ped
|
|
||||||
```
|
|
||||||
|
|
||||||
Use the default protocol (tcp) and host (localhost:3306):
|
|
||||||
```
|
|
||||||
user:password@/dbname
|
|
||||||
```
|
|
||||||
|
|
||||||
No Database preselected:
|
|
||||||
```
|
|
||||||
user:password@/
|
|
||||||
```
|
|
||||||
|
|
||||||
### `LOAD DATA LOCAL INFILE` support
|
|
||||||
For this feature you need direct access to the package. Therefore you must change the import path (no `_`):
|
|
||||||
```go
|
|
||||||
import "github.com/go-sql-driver/mysql"
|
|
||||||
```
|
|
||||||
|
|
||||||
Files must be whitelisted by registering them with `mysql.RegisterLocalFile(filepath)` (recommended) or the Whitelist check must be deactivated by using the DSN parameter `allowAllFiles=true` ([*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)).
|
|
||||||
|
|
||||||
To use a `io.Reader` a handler function must be registered with `mysql.RegisterReaderHandler(name, handler)` which returns a `io.Reader` or `io.ReadCloser`. The Reader is available with the filepath `Reader::<name>` then. Choose different names for different handlers and `DeregisterReaderHandler` when you don't need it anymore.
|
|
||||||
|
|
||||||
See the [godoc of Go-MySQL-Driver](http://godoc.org/github.com/go-sql-driver/mysql "golang mysql driver documentation") for details.
|
|
||||||
|
|
||||||
|
|
||||||
### `time.Time` support
|
|
||||||
The default internal output type of MySQL `DATE` and `DATETIME` values is `[]byte` which allows you to scan the value into a `[]byte`, `string` or `sql.RawBytes` variable in your programm.
|
|
||||||
|
|
||||||
However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` variables, which is the logical opposite in Go to `DATE` and `DATETIME` in MySQL. You can do that by changing the internal output type from `[]byte` to `time.Time` with the DSN parameter `parseTime=true`. You can set the default [`time.Time` location](http://golang.org/pkg/time/#Location) with the `loc` DSN parameter.
|
|
||||||
|
|
||||||
**Caution:** As of Go 1.1, this makes `time.Time` the only variable type you can scan `DATE` and `DATETIME` values into. This breaks for example [`sql.RawBytes` support](https://github.com/go-sql-driver/mysql/wiki/Examples#rawbytes).
|
|
||||||
|
|
||||||
Alternatively you can use the [`NullTime`](http://godoc.org/github.com/go-sql-driver/mysql#NullTime) type as the scan destination, which works with both `time.Time` and `string` / `[]byte`.
|
|
||||||
|
|
||||||
|
|
||||||
### Unicode support
|
|
||||||
Since version 1.1 Go-MySQL-Driver automatically uses the collation `utf8_general_ci` by default.
|
|
||||||
|
|
||||||
Other collations / charsets can be set using the [`collation`](#collation) DSN parameter.
|
|
||||||
|
|
||||||
Version 1.0 of the driver recommended adding `&charset=utf8` (alias for `SET NAMES utf8`) to the DSN to enable proper UTF-8 support. This is not necessary anymore. The [`collation`](#collation) parameter should be preferred to set another collation / charset than the default.
|
|
||||||
|
|
||||||
See http://dev.mysql.com/doc/refman/5.7/en/charset-unicode.html for more details on MySQL's Unicode support.
|
|
||||||
|
|
||||||
|
|
||||||
## Testing / Development
|
|
||||||
To run the driver tests you may need to adjust the configuration. See the [Testing Wiki-Page](https://github.com/go-sql-driver/mysql/wiki/Testing "Testing") for details.
|
|
||||||
|
|
||||||
Go-MySQL-Driver is not feature-complete yet. Your help is very appreciated.
|
|
||||||
If you want to contribute, you can work on an [open issue](https://github.com/go-sql-driver/mysql/issues?state=open) or review a [pull request](https://github.com/go-sql-driver/mysql/pulls).
|
|
||||||
|
|
||||||
See the [Contribution Guidelines](https://github.com/go-sql-driver/mysql/blob/master/CONTRIBUTING.md) for details.
|
|
||||||
|
|
||||||
---------------------------------------
|
|
||||||
|
|
||||||
## License
|
|
||||||
Go-MySQL-Driver is licensed under the [Mozilla Public License Version 2.0](https://raw.github.com/go-sql-driver/mysql/master/LICENSE)
|
|
||||||
|
|
||||||
Mozilla summarizes the license scope as follows:
|
|
||||||
> MPL: The copyleft applies to any files containing MPLed code.
|
|
||||||
|
|
||||||
|
|
||||||
That means:
|
|
||||||
* You can **use** the **unchanged** source code both in private and commercially
|
|
||||||
* When distributing, you **must publish** the source code of any **changed files** licensed under the MPL 2.0 under a) the MPL 2.0 itself or b) a compatible license (e.g. GPL 3.0 or Apache License 2.0)
|
|
||||||
* You **needn't publish** the source code of your library as long as the files licensed under the MPL 2.0 are **unchanged**
|
|
||||||
|
|
||||||
Please read the [MPL 2.0 FAQ](http://www.mozilla.org/MPL/2.0/FAQ.html) if you have further questions regarding the license.
|
|
||||||
|
|
||||||
You can read the full terms here: [LICENSE](https://raw.github.com/go-sql-driver/mysql/master/LICENSE)
|
|
||||||
|
|
||||||
![Go Gopher and MySQL Dolphin](https://raw.github.com/wiki/go-sql-driver/mysql/go-mysql-driver_m.jpg "Golang Gopher transporting the MySQL Dolphin in a wheelbarrow")
|
|
||||||
|
|
|
@ -1,19 +0,0 @@
|
||||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
|
||||||
//
|
|
||||||
// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
|
|
||||||
//
|
|
||||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
||||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
||||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
|
||||||
|
|
||||||
// +build appengine
|
|
||||||
|
|
||||||
package mysql
|
|
||||||
|
|
||||||
import (
|
|
||||||
"appengine/cloudsql"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
RegisterDial("cloudsql", cloudsql.Dial)
|
|
||||||
}
|
|
|
@ -1,147 +0,0 @@
|
||||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
|
||||||
//
|
|
||||||
// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
|
|
||||||
//
|
|
||||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
||||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
||||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
|
||||||
|
|
||||||
package mysql
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"net"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const defaultBufSize = 4096
|
|
||||||
|
|
||||||
// A buffer which is used for both reading and writing.
|
|
||||||
// This is possible since communication on each connection is synchronous.
|
|
||||||
// In other words, we can't write and read simultaneously on the same connection.
|
|
||||||
// The buffer is similar to bufio.Reader / Writer but zero-copy-ish
|
|
||||||
// Also highly optimized for this particular use case.
|
|
||||||
type buffer struct {
|
|
||||||
buf []byte
|
|
||||||
nc net.Conn
|
|
||||||
idx int
|
|
||||||
length int
|
|
||||||
timeout time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
func newBuffer(nc net.Conn) buffer {
|
|
||||||
var b [defaultBufSize]byte
|
|
||||||
return buffer{
|
|
||||||
buf: b[:],
|
|
||||||
nc: nc,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// fill reads into the buffer until at least _need_ bytes are in it
|
|
||||||
func (b *buffer) fill(need int) error {
|
|
||||||
n := b.length
|
|
||||||
|
|
||||||
// move existing data to the beginning
|
|
||||||
if n > 0 && b.idx > 0 {
|
|
||||||
copy(b.buf[0:n], b.buf[b.idx:])
|
|
||||||
}
|
|
||||||
|
|
||||||
// grow buffer if necessary
|
|
||||||
// TODO: let the buffer shrink again at some point
|
|
||||||
// Maybe keep the org buf slice and swap back?
|
|
||||||
if need > len(b.buf) {
|
|
||||||
// Round up to the next multiple of the default size
|
|
||||||
newBuf := make([]byte, ((need/defaultBufSize)+1)*defaultBufSize)
|
|
||||||
copy(newBuf, b.buf)
|
|
||||||
b.buf = newBuf
|
|
||||||
}
|
|
||||||
|
|
||||||
b.idx = 0
|
|
||||||
|
|
||||||
for {
|
|
||||||
if b.timeout > 0 {
|
|
||||||
if err := b.nc.SetReadDeadline(time.Now().Add(b.timeout)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
nn, err := b.nc.Read(b.buf[n:])
|
|
||||||
n += nn
|
|
||||||
|
|
||||||
switch err {
|
|
||||||
case nil:
|
|
||||||
if n < need {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
b.length = n
|
|
||||||
return nil
|
|
||||||
|
|
||||||
case io.EOF:
|
|
||||||
if n >= need {
|
|
||||||
b.length = n
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
|
|
||||||
default:
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// returns next N bytes from buffer.
|
|
||||||
// The returned slice is only guaranteed to be valid until the next read
|
|
||||||
func (b *buffer) readNext(need int) ([]byte, error) {
|
|
||||||
if b.length < need {
|
|
||||||
// refill
|
|
||||||
if err := b.fill(need); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
offset := b.idx
|
|
||||||
b.idx += need
|
|
||||||
b.length -= need
|
|
||||||
return b.buf[offset:b.idx], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// returns a buffer with the requested size.
|
|
||||||
// If possible, a slice from the existing buffer is returned.
|
|
||||||
// Otherwise a bigger buffer is made.
|
|
||||||
// Only one buffer (total) can be used at a time.
|
|
||||||
func (b *buffer) takeBuffer(length int) []byte {
|
|
||||||
if b.length > 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// test (cheap) general case first
|
|
||||||
if length <= defaultBufSize || length <= cap(b.buf) {
|
|
||||||
return b.buf[:length]
|
|
||||||
}
|
|
||||||
|
|
||||||
if length < maxPacketSize {
|
|
||||||
b.buf = make([]byte, length)
|
|
||||||
return b.buf
|
|
||||||
}
|
|
||||||
return make([]byte, length)
|
|
||||||
}
|
|
||||||
|
|
||||||
// shortcut which can be used if the requested buffer is guaranteed to be
|
|
||||||
// smaller than defaultBufSize
|
|
||||||
// Only one buffer (total) can be used at a time.
|
|
||||||
func (b *buffer) takeSmallBuffer(length int) []byte {
|
|
||||||
if b.length == 0 {
|
|
||||||
return b.buf[:length]
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// takeCompleteBuffer returns the complete existing buffer.
|
|
||||||
// This can be used if the necessary buffer size is unknown.
|
|
||||||
// Only one buffer (total) can be used at a time.
|
|
||||||
func (b *buffer) takeCompleteBuffer() []byte {
|
|
||||||
if b.length == 0 {
|
|
||||||
return b.buf
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
|
@ -1,250 +0,0 @@
|
||||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
|
||||||
//
|
|
||||||
// Copyright 2014 The Go-MySQL-Driver Authors. All rights reserved.
|
|
||||||
//
|
|
||||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
||||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
||||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
|
||||||
|
|
||||||
package mysql
|
|
||||||
|
|
||||||
const defaultCollation = "utf8_general_ci"
|
|
||||||
|
|
||||||
// A list of available collations mapped to the internal ID.
|
|
||||||
// To update this map use the following MySQL query:
|
|
||||||
// SELECT COLLATION_NAME, ID FROM information_schema.COLLATIONS
|
|
||||||
var collations = map[string]byte{
|
|
||||||
"big5_chinese_ci": 1,
|
|
||||||
"latin2_czech_cs": 2,
|
|
||||||
"dec8_swedish_ci": 3,
|
|
||||||
"cp850_general_ci": 4,
|
|
||||||
"latin1_german1_ci": 5,
|
|
||||||
"hp8_english_ci": 6,
|
|
||||||
"koi8r_general_ci": 7,
|
|
||||||
"latin1_swedish_ci": 8,
|
|
||||||
"latin2_general_ci": 9,
|
|
||||||
"swe7_swedish_ci": 10,
|
|
||||||
"ascii_general_ci": 11,
|
|
||||||
"ujis_japanese_ci": 12,
|
|
||||||
"sjis_japanese_ci": 13,
|
|
||||||
"cp1251_bulgarian_ci": 14,
|
|
||||||
"latin1_danish_ci": 15,
|
|
||||||
"hebrew_general_ci": 16,
|
|
||||||
"tis620_thai_ci": 18,
|
|
||||||
"euckr_korean_ci": 19,
|
|
||||||
"latin7_estonian_cs": 20,
|
|
||||||
"latin2_hungarian_ci": 21,
|
|
||||||
"koi8u_general_ci": 22,
|
|
||||||
"cp1251_ukrainian_ci": 23,
|
|
||||||
"gb2312_chinese_ci": 24,
|
|
||||||
"greek_general_ci": 25,
|
|
||||||
"cp1250_general_ci": 26,
|
|
||||||
"latin2_croatian_ci": 27,
|
|
||||||
"gbk_chinese_ci": 28,
|
|
||||||
"cp1257_lithuanian_ci": 29,
|
|
||||||
"latin5_turkish_ci": 30,
|
|
||||||
"latin1_german2_ci": 31,
|
|
||||||
"armscii8_general_ci": 32,
|
|
||||||
"utf8_general_ci": 33,
|
|
||||||
"cp1250_czech_cs": 34,
|
|
||||||
"ucs2_general_ci": 35,
|
|
||||||
"cp866_general_ci": 36,
|
|
||||||
"keybcs2_general_ci": 37,
|
|
||||||
"macce_general_ci": 38,
|
|
||||||
"macroman_general_ci": 39,
|
|
||||||
"cp852_general_ci": 40,
|
|
||||||
"latin7_general_ci": 41,
|
|
||||||
"latin7_general_cs": 42,
|
|
||||||
"macce_bin": 43,
|
|
||||||
"cp1250_croatian_ci": 44,
|
|
||||||
"utf8mb4_general_ci": 45,
|
|
||||||
"utf8mb4_bin": 46,
|
|
||||||
"latin1_bin": 47,
|
|
||||||
"latin1_general_ci": 48,
|
|
||||||
"latin1_general_cs": 49,
|
|
||||||
"cp1251_bin": 50,
|
|
||||||
"cp1251_general_ci": 51,
|
|
||||||
"cp1251_general_cs": 52,
|
|
||||||
"macroman_bin": 53,
|
|
||||||
"utf16_general_ci": 54,
|
|
||||||
"utf16_bin": 55,
|
|
||||||
"utf16le_general_ci": 56,
|
|
||||||
"cp1256_general_ci": 57,
|
|
||||||
"cp1257_bin": 58,
|
|
||||||
"cp1257_general_ci": 59,
|
|
||||||
"utf32_general_ci": 60,
|
|
||||||
"utf32_bin": 61,
|
|
||||||
"utf16le_bin": 62,
|
|
||||||
"binary": 63,
|
|
||||||
"armscii8_bin": 64,
|
|
||||||
"ascii_bin": 65,
|
|
||||||
"cp1250_bin": 66,
|
|
||||||
"cp1256_bin": 67,
|
|
||||||
"cp866_bin": 68,
|
|
||||||
"dec8_bin": 69,
|
|
||||||
"greek_bin": 70,
|
|
||||||
"hebrew_bin": 71,
|
|
||||||
"hp8_bin": 72,
|
|
||||||
"keybcs2_bin": 73,
|
|
||||||
"koi8r_bin": 74,
|
|
||||||
"koi8u_bin": 75,
|
|
||||||
"latin2_bin": 77,
|
|
||||||
"latin5_bin": 78,
|
|
||||||
"latin7_bin": 79,
|
|
||||||
"cp850_bin": 80,
|
|
||||||
"cp852_bin": 81,
|
|
||||||
"swe7_bin": 82,
|
|
||||||
"utf8_bin": 83,
|
|
||||||
"big5_bin": 84,
|
|
||||||
"euckr_bin": 85,
|
|
||||||
"gb2312_bin": 86,
|
|
||||||
"gbk_bin": 87,
|
|
||||||
"sjis_bin": 88,
|
|
||||||
"tis620_bin": 89,
|
|
||||||
"ucs2_bin": 90,
|
|
||||||
"ujis_bin": 91,
|
|
||||||
"geostd8_general_ci": 92,
|
|
||||||
"geostd8_bin": 93,
|
|
||||||
"latin1_spanish_ci": 94,
|
|
||||||
"cp932_japanese_ci": 95,
|
|
||||||
"cp932_bin": 96,
|
|
||||||
"eucjpms_japanese_ci": 97,
|
|
||||||
"eucjpms_bin": 98,
|
|
||||||
"cp1250_polish_ci": 99,
|
|
||||||
"utf16_unicode_ci": 101,
|
|
||||||
"utf16_icelandic_ci": 102,
|
|
||||||
"utf16_latvian_ci": 103,
|
|
||||||
"utf16_romanian_ci": 104,
|
|
||||||
"utf16_slovenian_ci": 105,
|
|
||||||
"utf16_polish_ci": 106,
|
|
||||||
"utf16_estonian_ci": 107,
|
|
||||||
"utf16_spanish_ci": 108,
|
|
||||||
"utf16_swedish_ci": 109,
|
|
||||||
"utf16_turkish_ci": 110,
|
|
||||||
"utf16_czech_ci": 111,
|
|
||||||
"utf16_danish_ci": 112,
|
|
||||||
"utf16_lithuanian_ci": 113,
|
|
||||||
"utf16_slovak_ci": 114,
|
|
||||||
"utf16_spanish2_ci": 115,
|
|
||||||
"utf16_roman_ci": 116,
|
|
||||||
"utf16_persian_ci": 117,
|
|
||||||
"utf16_esperanto_ci": 118,
|
|
||||||
"utf16_hungarian_ci": 119,
|
|
||||||
"utf16_sinhala_ci": 120,
|
|
||||||
"utf16_german2_ci": 121,
|
|
||||||
"utf16_croatian_ci": 122,
|
|
||||||
"utf16_unicode_520_ci": 123,
|
|
||||||
"utf16_vietnamese_ci": 124,
|
|
||||||
"ucs2_unicode_ci": 128,
|
|
||||||
"ucs2_icelandic_ci": 129,
|
|
||||||
"ucs2_latvian_ci": 130,
|
|
||||||
"ucs2_romanian_ci": 131,
|
|
||||||
"ucs2_slovenian_ci": 132,
|
|
||||||
"ucs2_polish_ci": 133,
|
|
||||||
"ucs2_estonian_ci": 134,
|
|
||||||
"ucs2_spanish_ci": 135,
|
|
||||||
"ucs2_swedish_ci": 136,
|
|
||||||
"ucs2_turkish_ci": 137,
|
|
||||||
"ucs2_czech_ci": 138,
|
|
||||||
"ucs2_danish_ci": 139,
|
|
||||||
"ucs2_lithuanian_ci": 140,
|
|
||||||
"ucs2_slovak_ci": 141,
|
|
||||||
"ucs2_spanish2_ci": 142,
|
|
||||||
"ucs2_roman_ci": 143,
|
|
||||||
"ucs2_persian_ci": 144,
|
|
||||||
"ucs2_esperanto_ci": 145,
|
|
||||||
"ucs2_hungarian_ci": 146,
|
|
||||||
"ucs2_sinhala_ci": 147,
|
|
||||||
"ucs2_german2_ci": 148,
|
|
||||||
"ucs2_croatian_ci": 149,
|
|
||||||
"ucs2_unicode_520_ci": 150,
|
|
||||||
"ucs2_vietnamese_ci": 151,
|
|
||||||
"ucs2_general_mysql500_ci": 159,
|
|
||||||
"utf32_unicode_ci": 160,
|
|
||||||
"utf32_icelandic_ci": 161,
|
|
||||||
"utf32_latvian_ci": 162,
|
|
||||||
"utf32_romanian_ci": 163,
|
|
||||||
"utf32_slovenian_ci": 164,
|
|
||||||
"utf32_polish_ci": 165,
|
|
||||||
"utf32_estonian_ci": 166,
|
|
||||||
"utf32_spanish_ci": 167,
|
|
||||||
"utf32_swedish_ci": 168,
|
|
||||||
"utf32_turkish_ci": 169,
|
|
||||||
"utf32_czech_ci": 170,
|
|
||||||
"utf32_danish_ci": 171,
|
|
||||||
"utf32_lithuanian_ci": 172,
|
|
||||||
"utf32_slovak_ci": 173,
|
|
||||||
"utf32_spanish2_ci": 174,
|
|
||||||
"utf32_roman_ci": 175,
|
|
||||||
"utf32_persian_ci": 176,
|
|
||||||
"utf32_esperanto_ci": 177,
|
|
||||||
"utf32_hungarian_ci": 178,
|
|
||||||
"utf32_sinhala_ci": 179,
|
|
||||||
"utf32_german2_ci": 180,
|
|
||||||
"utf32_croatian_ci": 181,
|
|
||||||
"utf32_unicode_520_ci": 182,
|
|
||||||
"utf32_vietnamese_ci": 183,
|
|
||||||
"utf8_unicode_ci": 192,
|
|
||||||
"utf8_icelandic_ci": 193,
|
|
||||||
"utf8_latvian_ci": 194,
|
|
||||||
"utf8_romanian_ci": 195,
|
|
||||||
"utf8_slovenian_ci": 196,
|
|
||||||
"utf8_polish_ci": 197,
|
|
||||||
"utf8_estonian_ci": 198,
|
|
||||||
"utf8_spanish_ci": 199,
|
|
||||||
"utf8_swedish_ci": 200,
|
|
||||||
"utf8_turkish_ci": 201,
|
|
||||||
"utf8_czech_ci": 202,
|
|
||||||
"utf8_danish_ci": 203,
|
|
||||||
"utf8_lithuanian_ci": 204,
|
|
||||||
"utf8_slovak_ci": 205,
|
|
||||||
"utf8_spanish2_ci": 206,
|
|
||||||
"utf8_roman_ci": 207,
|
|
||||||
"utf8_persian_ci": 208,
|
|
||||||
"utf8_esperanto_ci": 209,
|
|
||||||
"utf8_hungarian_ci": 210,
|
|
||||||
"utf8_sinhala_ci": 211,
|
|
||||||
"utf8_german2_ci": 212,
|
|
||||||
"utf8_croatian_ci": 213,
|
|
||||||
"utf8_unicode_520_ci": 214,
|
|
||||||
"utf8_vietnamese_ci": 215,
|
|
||||||
"utf8_general_mysql500_ci": 223,
|
|
||||||
"utf8mb4_unicode_ci": 224,
|
|
||||||
"utf8mb4_icelandic_ci": 225,
|
|
||||||
"utf8mb4_latvian_ci": 226,
|
|
||||||
"utf8mb4_romanian_ci": 227,
|
|
||||||
"utf8mb4_slovenian_ci": 228,
|
|
||||||
"utf8mb4_polish_ci": 229,
|
|
||||||
"utf8mb4_estonian_ci": 230,
|
|
||||||
"utf8mb4_spanish_ci": 231,
|
|
||||||
"utf8mb4_swedish_ci": 232,
|
|
||||||
"utf8mb4_turkish_ci": 233,
|
|
||||||
"utf8mb4_czech_ci": 234,
|
|
||||||
"utf8mb4_danish_ci": 235,
|
|
||||||
"utf8mb4_lithuanian_ci": 236,
|
|
||||||
"utf8mb4_slovak_ci": 237,
|
|
||||||
"utf8mb4_spanish2_ci": 238,
|
|
||||||
"utf8mb4_roman_ci": 239,
|
|
||||||
"utf8mb4_persian_ci": 240,
|
|
||||||
"utf8mb4_esperanto_ci": 241,
|
|
||||||
"utf8mb4_hungarian_ci": 242,
|
|
||||||
"utf8mb4_sinhala_ci": 243,
|
|
||||||
"utf8mb4_german2_ci": 244,
|
|
||||||
"utf8mb4_croatian_ci": 245,
|
|
||||||
"utf8mb4_unicode_520_ci": 246,
|
|
||||||
"utf8mb4_vietnamese_ci": 247,
|
|
||||||
}
|
|
||||||
|
|
||||||
// A blacklist of collations which is unsafe to interpolate parameters.
|
|
||||||
// These multibyte encodings may contains 0x5c (`\`) in their trailing bytes.
|
|
||||||
var unsafeCollations = map[string]bool{
|
|
||||||
"big5_chinese_ci": true,
|
|
||||||
"sjis_japanese_ci": true,
|
|
||||||
"gbk_chinese_ci": true,
|
|
||||||
"big5_bin": true,
|
|
||||||
"gb2312_bin": true,
|
|
||||||
"gbk_bin": true,
|
|
||||||
"sjis_bin": true,
|
|
||||||
"cp932_japanese_ci": true,
|
|
||||||
"cp932_bin": true,
|
|
||||||
}
|
|
|
@ -1,377 +0,0 @@
|
||||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
|
||||||
//
|
|
||||||
// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
|
|
||||||
//
|
|
||||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
||||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
||||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
|
||||||
|
|
||||||
package mysql
|
|
||||||
|
|
||||||
import (
|
|
||||||
"database/sql/driver"
|
|
||||||
"net"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
type mysqlConn struct {
|
|
||||||
buf buffer
|
|
||||||
netConn net.Conn
|
|
||||||
affectedRows uint64
|
|
||||||
insertId uint64
|
|
||||||
cfg *Config
|
|
||||||
maxAllowedPacket int
|
|
||||||
maxWriteSize int
|
|
||||||
writeTimeout time.Duration
|
|
||||||
flags clientFlag
|
|
||||||
status statusFlag
|
|
||||||
sequence uint8
|
|
||||||
parseTime bool
|
|
||||||
strict bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handles parameters set in DSN after the connection is established
|
|
||||||
func (mc *mysqlConn) handleParams() (err error) {
|
|
||||||
for param, val := range mc.cfg.Params {
|
|
||||||
switch param {
|
|
||||||
// Charset
|
|
||||||
case "charset":
|
|
||||||
charsets := strings.Split(val, ",")
|
|
||||||
for i := range charsets {
|
|
||||||
// ignore errors here - a charset may not exist
|
|
||||||
err = mc.exec("SET NAMES " + charsets[i])
|
|
||||||
if err == nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// System Vars
|
|
||||||
default:
|
|
||||||
err = mc.exec("SET " + param + "=" + val + "")
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mc *mysqlConn) Begin() (driver.Tx, error) {
|
|
||||||
if mc.netConn == nil {
|
|
||||||
errLog.Print(ErrInvalidConn)
|
|
||||||
return nil, driver.ErrBadConn
|
|
||||||
}
|
|
||||||
err := mc.exec("START TRANSACTION")
|
|
||||||
if err == nil {
|
|
||||||
return &mysqlTx{mc}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mc *mysqlConn) Close() (err error) {
|
|
||||||
// Makes Close idempotent
|
|
||||||
if mc.netConn != nil {
|
|
||||||
err = mc.writeCommandPacket(comQuit)
|
|
||||||
}
|
|
||||||
|
|
||||||
mc.cleanup()
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Closes the network connection and unsets internal variables. Do not call this
|
|
||||||
// function after successfully authentication, call Close instead. This function
|
|
||||||
// is called before auth or on auth failure because MySQL will have already
|
|
||||||
// closed the network connection.
|
|
||||||
func (mc *mysqlConn) cleanup() {
|
|
||||||
// Makes cleanup idempotent
|
|
||||||
if mc.netConn != nil {
|
|
||||||
if err := mc.netConn.Close(); err != nil {
|
|
||||||
errLog.Print(err)
|
|
||||||
}
|
|
||||||
mc.netConn = nil
|
|
||||||
}
|
|
||||||
mc.cfg = nil
|
|
||||||
mc.buf.nc = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) {
|
|
||||||
if mc.netConn == nil {
|
|
||||||
errLog.Print(ErrInvalidConn)
|
|
||||||
return nil, driver.ErrBadConn
|
|
||||||
}
|
|
||||||
// Send command
|
|
||||||
err := mc.writeCommandPacketStr(comStmtPrepare, query)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
stmt := &mysqlStmt{
|
|
||||||
mc: mc,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read Result
|
|
||||||
columnCount, err := stmt.readPrepareResultPacket()
|
|
||||||
if err == nil {
|
|
||||||
if stmt.paramCount > 0 {
|
|
||||||
if err = mc.readUntilEOF(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if columnCount > 0 {
|
|
||||||
err = mc.readUntilEOF()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return stmt, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (string, error) {
|
|
||||||
// Number of ? should be same to len(args)
|
|
||||||
if strings.Count(query, "?") != len(args) {
|
|
||||||
return "", driver.ErrSkip
|
|
||||||
}
|
|
||||||
|
|
||||||
buf := mc.buf.takeCompleteBuffer()
|
|
||||||
if buf == nil {
|
|
||||||
// can not take the buffer. Something must be wrong with the connection
|
|
||||||
errLog.Print(ErrBusyBuffer)
|
|
||||||
return "", driver.ErrBadConn
|
|
||||||
}
|
|
||||||
buf = buf[:0]
|
|
||||||
argPos := 0
|
|
||||||
|
|
||||||
for i := 0; i < len(query); i++ {
|
|
||||||
q := strings.IndexByte(query[i:], '?')
|
|
||||||
if q == -1 {
|
|
||||||
buf = append(buf, query[i:]...)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
buf = append(buf, query[i:i+q]...)
|
|
||||||
i += q
|
|
||||||
|
|
||||||
arg := args[argPos]
|
|
||||||
argPos++
|
|
||||||
|
|
||||||
if arg == nil {
|
|
||||||
buf = append(buf, "NULL"...)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
switch v := arg.(type) {
|
|
||||||
case int64:
|
|
||||||
buf = strconv.AppendInt(buf, v, 10)
|
|
||||||
case float64:
|
|
||||||
buf = strconv.AppendFloat(buf, v, 'g', -1, 64)
|
|
||||||
case bool:
|
|
||||||
if v {
|
|
||||||
buf = append(buf, '1')
|
|
||||||
} else {
|
|
||||||
buf = append(buf, '0')
|
|
||||||
}
|
|
||||||
case time.Time:
|
|
||||||
if v.IsZero() {
|
|
||||||
buf = append(buf, "'0000-00-00'"...)
|
|
||||||
} else {
|
|
||||||
v := v.In(mc.cfg.Loc)
|
|
||||||
v = v.Add(time.Nanosecond * 500) // To round under microsecond
|
|
||||||
year := v.Year()
|
|
||||||
year100 := year / 100
|
|
||||||
year1 := year % 100
|
|
||||||
month := v.Month()
|
|
||||||
day := v.Day()
|
|
||||||
hour := v.Hour()
|
|
||||||
minute := v.Minute()
|
|
||||||
second := v.Second()
|
|
||||||
micro := v.Nanosecond() / 1000
|
|
||||||
|
|
||||||
buf = append(buf, []byte{
|
|
||||||
'\'',
|
|
||||||
digits10[year100], digits01[year100],
|
|
||||||
digits10[year1], digits01[year1],
|
|
||||||
'-',
|
|
||||||
digits10[month], digits01[month],
|
|
||||||
'-',
|
|
||||||
digits10[day], digits01[day],
|
|
||||||
' ',
|
|
||||||
digits10[hour], digits01[hour],
|
|
||||||
':',
|
|
||||||
digits10[minute], digits01[minute],
|
|
||||||
':',
|
|
||||||
digits10[second], digits01[second],
|
|
||||||
}...)
|
|
||||||
|
|
||||||
if micro != 0 {
|
|
||||||
micro10000 := micro / 10000
|
|
||||||
micro100 := micro / 100 % 100
|
|
||||||
micro1 := micro % 100
|
|
||||||
buf = append(buf, []byte{
|
|
||||||
'.',
|
|
||||||
digits10[micro10000], digits01[micro10000],
|
|
||||||
digits10[micro100], digits01[micro100],
|
|
||||||
digits10[micro1], digits01[micro1],
|
|
||||||
}...)
|
|
||||||
}
|
|
||||||
buf = append(buf, '\'')
|
|
||||||
}
|
|
||||||
case []byte:
|
|
||||||
if v == nil {
|
|
||||||
buf = append(buf, "NULL"...)
|
|
||||||
} else {
|
|
||||||
buf = append(buf, "_binary'"...)
|
|
||||||
if mc.status&statusNoBackslashEscapes == 0 {
|
|
||||||
buf = escapeBytesBackslash(buf, v)
|
|
||||||
} else {
|
|
||||||
buf = escapeBytesQuotes(buf, v)
|
|
||||||
}
|
|
||||||
buf = append(buf, '\'')
|
|
||||||
}
|
|
||||||
case string:
|
|
||||||
buf = append(buf, '\'')
|
|
||||||
if mc.status&statusNoBackslashEscapes == 0 {
|
|
||||||
buf = escapeStringBackslash(buf, v)
|
|
||||||
} else {
|
|
||||||
buf = escapeStringQuotes(buf, v)
|
|
||||||
}
|
|
||||||
buf = append(buf, '\'')
|
|
||||||
default:
|
|
||||||
return "", driver.ErrSkip
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(buf)+4 > mc.maxAllowedPacket {
|
|
||||||
return "", driver.ErrSkip
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if argPos != len(args) {
|
|
||||||
return "", driver.ErrSkip
|
|
||||||
}
|
|
||||||
return string(buf), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, error) {
|
|
||||||
if mc.netConn == nil {
|
|
||||||
errLog.Print(ErrInvalidConn)
|
|
||||||
return nil, driver.ErrBadConn
|
|
||||||
}
|
|
||||||
if len(args) != 0 {
|
|
||||||
if !mc.cfg.InterpolateParams {
|
|
||||||
return nil, driver.ErrSkip
|
|
||||||
}
|
|
||||||
// try to interpolate the parameters to save extra roundtrips for preparing and closing a statement
|
|
||||||
prepared, err := mc.interpolateParams(query, args)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
query = prepared
|
|
||||||
args = nil
|
|
||||||
}
|
|
||||||
mc.affectedRows = 0
|
|
||||||
mc.insertId = 0
|
|
||||||
|
|
||||||
err := mc.exec(query)
|
|
||||||
if err == nil {
|
|
||||||
return &mysqlResult{
|
|
||||||
affectedRows: int64(mc.affectedRows),
|
|
||||||
insertId: int64(mc.insertId),
|
|
||||||
}, err
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Internal function to execute commands
|
|
||||||
func (mc *mysqlConn) exec(query string) error {
|
|
||||||
// Send command
|
|
||||||
err := mc.writeCommandPacketStr(comQuery, query)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read Result
|
|
||||||
resLen, err := mc.readResultSetHeaderPacket()
|
|
||||||
if err == nil && resLen > 0 {
|
|
||||||
if err = mc.readUntilEOF(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = mc.readUntilEOF()
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, error) {
|
|
||||||
if mc.netConn == nil {
|
|
||||||
errLog.Print(ErrInvalidConn)
|
|
||||||
return nil, driver.ErrBadConn
|
|
||||||
}
|
|
||||||
if len(args) != 0 {
|
|
||||||
if !mc.cfg.InterpolateParams {
|
|
||||||
return nil, driver.ErrSkip
|
|
||||||
}
|
|
||||||
// try client-side prepare to reduce roundtrip
|
|
||||||
prepared, err := mc.interpolateParams(query, args)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
query = prepared
|
|
||||||
args = nil
|
|
||||||
}
|
|
||||||
// Send command
|
|
||||||
err := mc.writeCommandPacketStr(comQuery, query)
|
|
||||||
if err == nil {
|
|
||||||
// Read Result
|
|
||||||
var resLen int
|
|
||||||
resLen, err = mc.readResultSetHeaderPacket()
|
|
||||||
if err == nil {
|
|
||||||
rows := new(textRows)
|
|
||||||
rows.mc = mc
|
|
||||||
|
|
||||||
if resLen == 0 {
|
|
||||||
// no columns, no more data
|
|
||||||
return emptyRows{}, nil
|
|
||||||
}
|
|
||||||
// Columns
|
|
||||||
rows.columns, err = mc.readColumns(resLen)
|
|
||||||
return rows, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Gets the value of the given MySQL System Variable
|
|
||||||
// The returned byte slice is only valid until the next read
|
|
||||||
func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) {
|
|
||||||
// Send command
|
|
||||||
if err := mc.writeCommandPacketStr(comQuery, "SELECT @@"+name); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read Result
|
|
||||||
resLen, err := mc.readResultSetHeaderPacket()
|
|
||||||
if err == nil {
|
|
||||||
rows := new(textRows)
|
|
||||||
rows.mc = mc
|
|
||||||
rows.columns = []mysqlField{{fieldType: fieldTypeVarChar}}
|
|
||||||
|
|
||||||
if resLen > 0 {
|
|
||||||
// Columns
|
|
||||||
if err := mc.readUntilEOF(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
dest := make([]driver.Value, resLen)
|
|
||||||
if err = rows.readRow(dest); err == nil {
|
|
||||||
return dest[0].([]byte), mc.readUntilEOF()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
|
@ -1,163 +0,0 @@
|
||||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
|
||||||
//
|
|
||||||
// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
|
|
||||||
//
|
|
||||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
||||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
||||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
|
||||||
|
|
||||||
package mysql
|
|
||||||
|
|
||||||
const (
|
|
||||||
minProtocolVersion byte = 10
|
|
||||||
maxPacketSize = 1<<24 - 1
|
|
||||||
timeFormat = "2006-01-02 15:04:05.999999"
|
|
||||||
)
|
|
||||||
|
|
||||||
// MySQL constants documentation:
|
|
||||||
// http://dev.mysql.com/doc/internals/en/client-server-protocol.html
|
|
||||||
|
|
||||||
const (
|
|
||||||
iOK byte = 0x00
|
|
||||||
iLocalInFile byte = 0xfb
|
|
||||||
iEOF byte = 0xfe
|
|
||||||
iERR byte = 0xff
|
|
||||||
)
|
|
||||||
|
|
||||||
// https://dev.mysql.com/doc/internals/en/capability-flags.html#packet-Protocol::CapabilityFlags
|
|
||||||
type clientFlag uint32
|
|
||||||
|
|
||||||
const (
|
|
||||||
clientLongPassword clientFlag = 1 << iota
|
|
||||||
clientFoundRows
|
|
||||||
clientLongFlag
|
|
||||||
clientConnectWithDB
|
|
||||||
clientNoSchema
|
|
||||||
clientCompress
|
|
||||||
clientODBC
|
|
||||||
clientLocalFiles
|
|
||||||
clientIgnoreSpace
|
|
||||||
clientProtocol41
|
|
||||||
clientInteractive
|
|
||||||
clientSSL
|
|
||||||
clientIgnoreSIGPIPE
|
|
||||||
clientTransactions
|
|
||||||
clientReserved
|
|
||||||
clientSecureConn
|
|
||||||
clientMultiStatements
|
|
||||||
clientMultiResults
|
|
||||||
clientPSMultiResults
|
|
||||||
clientPluginAuth
|
|
||||||
clientConnectAttrs
|
|
||||||
clientPluginAuthLenEncClientData
|
|
||||||
clientCanHandleExpiredPasswords
|
|
||||||
clientSessionTrack
|
|
||||||
clientDeprecateEOF
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
comQuit byte = iota + 1
|
|
||||||
comInitDB
|
|
||||||
comQuery
|
|
||||||
comFieldList
|
|
||||||
comCreateDB
|
|
||||||
comDropDB
|
|
||||||
comRefresh
|
|
||||||
comShutdown
|
|
||||||
comStatistics
|
|
||||||
comProcessInfo
|
|
||||||
comConnect
|
|
||||||
comProcessKill
|
|
||||||
comDebug
|
|
||||||
comPing
|
|
||||||
comTime
|
|
||||||
comDelayedInsert
|
|
||||||
comChangeUser
|
|
||||||
comBinlogDump
|
|
||||||
comTableDump
|
|
||||||
comConnectOut
|
|
||||||
comRegisterSlave
|
|
||||||
comStmtPrepare
|
|
||||||
comStmtExecute
|
|
||||||
comStmtSendLongData
|
|
||||||
comStmtClose
|
|
||||||
comStmtReset
|
|
||||||
comSetOption
|
|
||||||
comStmtFetch
|
|
||||||
)
|
|
||||||
|
|
||||||
// https://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnType
|
|
||||||
const (
|
|
||||||
fieldTypeDecimal byte = iota
|
|
||||||
fieldTypeTiny
|
|
||||||
fieldTypeShort
|
|
||||||
fieldTypeLong
|
|
||||||
fieldTypeFloat
|
|
||||||
fieldTypeDouble
|
|
||||||
fieldTypeNULL
|
|
||||||
fieldTypeTimestamp
|
|
||||||
fieldTypeLongLong
|
|
||||||
fieldTypeInt24
|
|
||||||
fieldTypeDate
|
|
||||||
fieldTypeTime
|
|
||||||
fieldTypeDateTime
|
|
||||||
fieldTypeYear
|
|
||||||
fieldTypeNewDate
|
|
||||||
fieldTypeVarChar
|
|
||||||
fieldTypeBit
|
|
||||||
)
|
|
||||||
const (
|
|
||||||
fieldTypeJSON byte = iota + 0xf5
|
|
||||||
fieldTypeNewDecimal
|
|
||||||
fieldTypeEnum
|
|
||||||
fieldTypeSet
|
|
||||||
fieldTypeTinyBLOB
|
|
||||||
fieldTypeMediumBLOB
|
|
||||||
fieldTypeLongBLOB
|
|
||||||
fieldTypeBLOB
|
|
||||||
fieldTypeVarString
|
|
||||||
fieldTypeString
|
|
||||||
fieldTypeGeometry
|
|
||||||
)
|
|
||||||
|
|
||||||
type fieldFlag uint16
|
|
||||||
|
|
||||||
const (
|
|
||||||
flagNotNULL fieldFlag = 1 << iota
|
|
||||||
flagPriKey
|
|
||||||
flagUniqueKey
|
|
||||||
flagMultipleKey
|
|
||||||
flagBLOB
|
|
||||||
flagUnsigned
|
|
||||||
flagZeroFill
|
|
||||||
flagBinary
|
|
||||||
flagEnum
|
|
||||||
flagAutoIncrement
|
|
||||||
flagTimestamp
|
|
||||||
flagSet
|
|
||||||
flagUnknown1
|
|
||||||
flagUnknown2
|
|
||||||
flagUnknown3
|
|
||||||
flagUnknown4
|
|
||||||
)
|
|
||||||
|
|
||||||
// http://dev.mysql.com/doc/internals/en/status-flags.html
|
|
||||||
type statusFlag uint16
|
|
||||||
|
|
||||||
const (
|
|
||||||
statusInTrans statusFlag = 1 << iota
|
|
||||||
statusInAutocommit
|
|
||||||
statusReserved // Not in documentation
|
|
||||||
statusMoreResultsExists
|
|
||||||
statusNoGoodIndexUsed
|
|
||||||
statusNoIndexUsed
|
|
||||||
statusCursorExists
|
|
||||||
statusLastRowSent
|
|
||||||
statusDbDropped
|
|
||||||
statusNoBackslashEscapes
|
|
||||||
statusMetadataChanged
|
|
||||||
statusQueryWasSlow
|
|
||||||
statusPsOutParams
|
|
||||||
statusInTransReadonly
|
|
||||||
statusSessionStateChanged
|
|
||||||
)
|
|
|
@ -1,183 +0,0 @@
|
||||||
// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
|
|
||||||
//
|
|
||||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
||||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
||||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
|
||||||
|
|
||||||
// Package mysql provides a MySQL driver for Go's database/sql package
|
|
||||||
//
|
|
||||||
// The driver should be used via the database/sql package:
|
|
||||||
//
|
|
||||||
// import "database/sql"
|
|
||||||
// import _ "github.com/go-sql-driver/mysql"
|
|
||||||
//
|
|
||||||
// db, err := sql.Open("mysql", "user:password@/dbname")
|
|
||||||
//
|
|
||||||
// See https://github.com/go-sql-driver/mysql#usage for details
|
|
||||||
package mysql
|
|
||||||
|
|
||||||
import (
|
|
||||||
"database/sql"
|
|
||||||
"database/sql/driver"
|
|
||||||
"net"
|
|
||||||
)
|
|
||||||
|
|
||||||
// MySQLDriver is exported to make the driver directly accessible.
|
|
||||||
// In general the driver is used via the database/sql package.
|
|
||||||
type MySQLDriver struct{}
|
|
||||||
|
|
||||||
// DialFunc is a function which can be used to establish the network connection.
|
|
||||||
// Custom dial functions must be registered with RegisterDial
|
|
||||||
type DialFunc func(addr string) (net.Conn, error)
|
|
||||||
|
|
||||||
var dials map[string]DialFunc
|
|
||||||
|
|
||||||
// RegisterDial registers a custom dial function. It can then be used by the
|
|
||||||
// network address mynet(addr), where mynet is the registered new network.
|
|
||||||
// addr is passed as a parameter to the dial function.
|
|
||||||
func RegisterDial(net string, dial DialFunc) {
|
|
||||||
if dials == nil {
|
|
||||||
dials = make(map[string]DialFunc)
|
|
||||||
}
|
|
||||||
dials[net] = dial
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open new Connection.
|
|
||||||
// See https://github.com/go-sql-driver/mysql#dsn-data-source-name for how
|
|
||||||
// the DSN string is formated
|
|
||||||
func (d MySQLDriver) Open(dsn string) (driver.Conn, error) {
|
|
||||||
var err error
|
|
||||||
|
|
||||||
// New mysqlConn
|
|
||||||
mc := &mysqlConn{
|
|
||||||
maxAllowedPacket: maxPacketSize,
|
|
||||||
maxWriteSize: maxPacketSize - 1,
|
|
||||||
}
|
|
||||||
mc.cfg, err = ParseDSN(dsn)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
mc.parseTime = mc.cfg.ParseTime
|
|
||||||
mc.strict = mc.cfg.Strict
|
|
||||||
|
|
||||||
// Connect to Server
|
|
||||||
if dial, ok := dials[mc.cfg.Net]; ok {
|
|
||||||
mc.netConn, err = dial(mc.cfg.Addr)
|
|
||||||
} else {
|
|
||||||
nd := net.Dialer{Timeout: mc.cfg.Timeout}
|
|
||||||
mc.netConn, err = nd.Dial(mc.cfg.Net, mc.cfg.Addr)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Enable TCP Keepalives on TCP connections
|
|
||||||
if tc, ok := mc.netConn.(*net.TCPConn); ok {
|
|
||||||
if err := tc.SetKeepAlive(true); err != nil {
|
|
||||||
// Don't send COM_QUIT before handshake.
|
|
||||||
mc.netConn.Close()
|
|
||||||
mc.netConn = nil
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
mc.buf = newBuffer(mc.netConn)
|
|
||||||
|
|
||||||
// Set I/O timeouts
|
|
||||||
mc.buf.timeout = mc.cfg.ReadTimeout
|
|
||||||
mc.writeTimeout = mc.cfg.WriteTimeout
|
|
||||||
|
|
||||||
// Reading Handshake Initialization Packet
|
|
||||||
cipher, err := mc.readInitPacket()
|
|
||||||
if err != nil {
|
|
||||||
mc.cleanup()
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send Client Authentication Packet
|
|
||||||
if err = mc.writeAuthPacket(cipher); err != nil {
|
|
||||||
mc.cleanup()
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle response to auth packet, switch methods if possible
|
|
||||||
if err = handleAuthResult(mc, cipher); err != nil {
|
|
||||||
// Authentication failed and MySQL has already closed the connection
|
|
||||||
// (https://dev.mysql.com/doc/internals/en/authentication-fails.html).
|
|
||||||
// Do not send COM_QUIT, just cleanup and return the error.
|
|
||||||
mc.cleanup()
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if mc.cfg.MaxAllowedPacket > 0 {
|
|
||||||
mc.maxAllowedPacket = mc.cfg.MaxAllowedPacket
|
|
||||||
} else {
|
|
||||||
// Get max allowed packet size
|
|
||||||
maxap, err := mc.getSystemVar("max_allowed_packet")
|
|
||||||
if err != nil {
|
|
||||||
mc.Close()
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
mc.maxAllowedPacket = stringToInt(maxap) - 1
|
|
||||||
}
|
|
||||||
if mc.maxAllowedPacket < maxPacketSize {
|
|
||||||
mc.maxWriteSize = mc.maxAllowedPacket
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle DSN Params
|
|
||||||
err = mc.handleParams()
|
|
||||||
if err != nil {
|
|
||||||
mc.Close()
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return mc, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func handleAuthResult(mc *mysqlConn, oldCipher []byte) error {
|
|
||||||
// Read Result Packet
|
|
||||||
cipher, err := mc.readResultOK()
|
|
||||||
if err == nil {
|
|
||||||
return nil // auth successful
|
|
||||||
}
|
|
||||||
|
|
||||||
if mc.cfg == nil {
|
|
||||||
return err // auth failed and retry not possible
|
|
||||||
}
|
|
||||||
|
|
||||||
// Retry auth if configured to do so.
|
|
||||||
if mc.cfg.AllowOldPasswords && err == ErrOldPassword {
|
|
||||||
// Retry with old authentication method. Note: there are edge cases
|
|
||||||
// where this should work but doesn't; this is currently "wontfix":
|
|
||||||
// https://github.com/go-sql-driver/mysql/issues/184
|
|
||||||
|
|
||||||
// If CLIENT_PLUGIN_AUTH capability is not supported, no new cipher is
|
|
||||||
// sent and we have to keep using the cipher sent in the init packet.
|
|
||||||
if cipher == nil {
|
|
||||||
cipher = oldCipher
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = mc.writeOldAuthPacket(cipher); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, err = mc.readResultOK()
|
|
||||||
} else if mc.cfg.AllowCleartextPasswords && err == ErrCleartextPassword {
|
|
||||||
// Retry with clear text password for
|
|
||||||
// http://dev.mysql.com/doc/refman/5.7/en/cleartext-authentication-plugin.html
|
|
||||||
// http://dev.mysql.com/doc/refman/5.7/en/pam-authentication-plugin.html
|
|
||||||
if err = mc.writeClearAuthPacket(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, err = mc.readResultOK()
|
|
||||||
} else if mc.cfg.AllowNativePasswords && err == ErrNativePassword {
|
|
||||||
if err = mc.writeNativeAuthPacket(cipher); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, err = mc.readResultOK()
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
sql.Register("mysql", &MySQLDriver{})
|
|
||||||
}
|
|
|
@ -1,548 +0,0 @@
|
||||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
|
||||||
//
|
|
||||||
// Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved.
|
|
||||||
//
|
|
||||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
||||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
||||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
|
||||||
|
|
||||||
package mysql
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"crypto/tls"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"net/url"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
errInvalidDSNUnescaped = errors.New("invalid DSN: did you forget to escape a param value?")
|
|
||||||
errInvalidDSNAddr = errors.New("invalid DSN: network address not terminated (missing closing brace)")
|
|
||||||
errInvalidDSNNoSlash = errors.New("invalid DSN: missing the slash separating the database name")
|
|
||||||
errInvalidDSNUnsafeCollation = errors.New("invalid DSN: interpolateParams can not be used with unsafe collations")
|
|
||||||
)
|
|
||||||
|
|
||||||
// Config is a configuration parsed from a DSN string
|
|
||||||
type Config struct {
|
|
||||||
User string // Username
|
|
||||||
Passwd string // Password (requires User)
|
|
||||||
Net string // Network type
|
|
||||||
Addr string // Network address (requires Net)
|
|
||||||
DBName string // Database name
|
|
||||||
Params map[string]string // Connection parameters
|
|
||||||
Collation string // Connection collation
|
|
||||||
Loc *time.Location // Location for time.Time values
|
|
||||||
MaxAllowedPacket int // Max packet size allowed
|
|
||||||
TLSConfig string // TLS configuration name
|
|
||||||
tls *tls.Config // TLS configuration
|
|
||||||
Timeout time.Duration // Dial timeout
|
|
||||||
ReadTimeout time.Duration // I/O read timeout
|
|
||||||
WriteTimeout time.Duration // I/O write timeout
|
|
||||||
|
|
||||||
AllowAllFiles bool // Allow all files to be used with LOAD DATA LOCAL INFILE
|
|
||||||
AllowCleartextPasswords bool // Allows the cleartext client side plugin
|
|
||||||
AllowNativePasswords bool // Allows the native password authentication method
|
|
||||||
AllowOldPasswords bool // Allows the old insecure password method
|
|
||||||
ClientFoundRows bool // Return number of matching rows instead of rows changed
|
|
||||||
ColumnsWithAlias bool // Prepend table alias to column names
|
|
||||||
InterpolateParams bool // Interpolate placeholders into query string
|
|
||||||
MultiStatements bool // Allow multiple statements in one query
|
|
||||||
ParseTime bool // Parse time values to time.Time
|
|
||||||
Strict bool // Return warnings as errors
|
|
||||||
}
|
|
||||||
|
|
||||||
// FormatDSN formats the given Config into a DSN string which can be passed to
|
|
||||||
// the driver.
|
|
||||||
func (cfg *Config) FormatDSN() string {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
|
|
||||||
// [username[:password]@]
|
|
||||||
if len(cfg.User) > 0 {
|
|
||||||
buf.WriteString(cfg.User)
|
|
||||||
if len(cfg.Passwd) > 0 {
|
|
||||||
buf.WriteByte(':')
|
|
||||||
buf.WriteString(cfg.Passwd)
|
|
||||||
}
|
|
||||||
buf.WriteByte('@')
|
|
||||||
}
|
|
||||||
|
|
||||||
// [protocol[(address)]]
|
|
||||||
if len(cfg.Net) > 0 {
|
|
||||||
buf.WriteString(cfg.Net)
|
|
||||||
if len(cfg.Addr) > 0 {
|
|
||||||
buf.WriteByte('(')
|
|
||||||
buf.WriteString(cfg.Addr)
|
|
||||||
buf.WriteByte(')')
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// /dbname
|
|
||||||
buf.WriteByte('/')
|
|
||||||
buf.WriteString(cfg.DBName)
|
|
||||||
|
|
||||||
// [?param1=value1&...¶mN=valueN]
|
|
||||||
hasParam := false
|
|
||||||
|
|
||||||
if cfg.AllowAllFiles {
|
|
||||||
hasParam = true
|
|
||||||
buf.WriteString("?allowAllFiles=true")
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.AllowCleartextPasswords {
|
|
||||||
if hasParam {
|
|
||||||
buf.WriteString("&allowCleartextPasswords=true")
|
|
||||||
} else {
|
|
||||||
hasParam = true
|
|
||||||
buf.WriteString("?allowCleartextPasswords=true")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.AllowNativePasswords {
|
|
||||||
if hasParam {
|
|
||||||
buf.WriteString("&allowNativePasswords=true")
|
|
||||||
} else {
|
|
||||||
hasParam = true
|
|
||||||
buf.WriteString("?allowNativePasswords=true")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.AllowOldPasswords {
|
|
||||||
if hasParam {
|
|
||||||
buf.WriteString("&allowOldPasswords=true")
|
|
||||||
} else {
|
|
||||||
hasParam = true
|
|
||||||
buf.WriteString("?allowOldPasswords=true")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.ClientFoundRows {
|
|
||||||
if hasParam {
|
|
||||||
buf.WriteString("&clientFoundRows=true")
|
|
||||||
} else {
|
|
||||||
hasParam = true
|
|
||||||
buf.WriteString("?clientFoundRows=true")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if col := cfg.Collation; col != defaultCollation && len(col) > 0 {
|
|
||||||
if hasParam {
|
|
||||||
buf.WriteString("&collation=")
|
|
||||||
} else {
|
|
||||||
hasParam = true
|
|
||||||
buf.WriteString("?collation=")
|
|
||||||
}
|
|
||||||
buf.WriteString(col)
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.ColumnsWithAlias {
|
|
||||||
if hasParam {
|
|
||||||
buf.WriteString("&columnsWithAlias=true")
|
|
||||||
} else {
|
|
||||||
hasParam = true
|
|
||||||
buf.WriteString("?columnsWithAlias=true")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.InterpolateParams {
|
|
||||||
if hasParam {
|
|
||||||
buf.WriteString("&interpolateParams=true")
|
|
||||||
} else {
|
|
||||||
hasParam = true
|
|
||||||
buf.WriteString("?interpolateParams=true")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.Loc != time.UTC && cfg.Loc != nil {
|
|
||||||
if hasParam {
|
|
||||||
buf.WriteString("&loc=")
|
|
||||||
} else {
|
|
||||||
hasParam = true
|
|
||||||
buf.WriteString("?loc=")
|
|
||||||
}
|
|
||||||
buf.WriteString(url.QueryEscape(cfg.Loc.String()))
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.MultiStatements {
|
|
||||||
if hasParam {
|
|
||||||
buf.WriteString("&multiStatements=true")
|
|
||||||
} else {
|
|
||||||
hasParam = true
|
|
||||||
buf.WriteString("?multiStatements=true")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.ParseTime {
|
|
||||||
if hasParam {
|
|
||||||
buf.WriteString("&parseTime=true")
|
|
||||||
} else {
|
|
||||||
hasParam = true
|
|
||||||
buf.WriteString("?parseTime=true")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.ReadTimeout > 0 {
|
|
||||||
if hasParam {
|
|
||||||
buf.WriteString("&readTimeout=")
|
|
||||||
} else {
|
|
||||||
hasParam = true
|
|
||||||
buf.WriteString("?readTimeout=")
|
|
||||||
}
|
|
||||||
buf.WriteString(cfg.ReadTimeout.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.Strict {
|
|
||||||
if hasParam {
|
|
||||||
buf.WriteString("&strict=true")
|
|
||||||
} else {
|
|
||||||
hasParam = true
|
|
||||||
buf.WriteString("?strict=true")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.Timeout > 0 {
|
|
||||||
if hasParam {
|
|
||||||
buf.WriteString("&timeout=")
|
|
||||||
} else {
|
|
||||||
hasParam = true
|
|
||||||
buf.WriteString("?timeout=")
|
|
||||||
}
|
|
||||||
buf.WriteString(cfg.Timeout.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(cfg.TLSConfig) > 0 {
|
|
||||||
if hasParam {
|
|
||||||
buf.WriteString("&tls=")
|
|
||||||
} else {
|
|
||||||
hasParam = true
|
|
||||||
buf.WriteString("?tls=")
|
|
||||||
}
|
|
||||||
buf.WriteString(url.QueryEscape(cfg.TLSConfig))
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.WriteTimeout > 0 {
|
|
||||||
if hasParam {
|
|
||||||
buf.WriteString("&writeTimeout=")
|
|
||||||
} else {
|
|
||||||
hasParam = true
|
|
||||||
buf.WriteString("?writeTimeout=")
|
|
||||||
}
|
|
||||||
buf.WriteString(cfg.WriteTimeout.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.MaxAllowedPacket > 0 {
|
|
||||||
if hasParam {
|
|
||||||
buf.WriteString("&maxAllowedPacket=")
|
|
||||||
} else {
|
|
||||||
hasParam = true
|
|
||||||
buf.WriteString("?maxAllowedPacket=")
|
|
||||||
}
|
|
||||||
buf.WriteString(strconv.Itoa(cfg.MaxAllowedPacket))
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// other params
|
|
||||||
if cfg.Params != nil {
|
|
||||||
for param, value := range cfg.Params {
|
|
||||||
if hasParam {
|
|
||||||
buf.WriteByte('&')
|
|
||||||
} else {
|
|
||||||
hasParam = true
|
|
||||||
buf.WriteByte('?')
|
|
||||||
}
|
|
||||||
|
|
||||||
buf.WriteString(param)
|
|
||||||
buf.WriteByte('=')
|
|
||||||
buf.WriteString(url.QueryEscape(value))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseDSN parses the DSN string to a Config
|
|
||||||
func ParseDSN(dsn string) (cfg *Config, err error) {
|
|
||||||
// New config with some default values
|
|
||||||
cfg = &Config{
|
|
||||||
Loc: time.UTC,
|
|
||||||
Collation: defaultCollation,
|
|
||||||
}
|
|
||||||
|
|
||||||
// [user[:password]@][net[(addr)]]/dbname[?param1=value1¶mN=valueN]
|
|
||||||
// Find the last '/' (since the password or the net addr might contain a '/')
|
|
||||||
foundSlash := false
|
|
||||||
for i := len(dsn) - 1; i >= 0; i-- {
|
|
||||||
if dsn[i] == '/' {
|
|
||||||
foundSlash = true
|
|
||||||
var j, k int
|
|
||||||
|
|
||||||
// left part is empty if i <= 0
|
|
||||||
if i > 0 {
|
|
||||||
// [username[:password]@][protocol[(address)]]
|
|
||||||
// Find the last '@' in dsn[:i]
|
|
||||||
for j = i; j >= 0; j-- {
|
|
||||||
if dsn[j] == '@' {
|
|
||||||
// username[:password]
|
|
||||||
// Find the first ':' in dsn[:j]
|
|
||||||
for k = 0; k < j; k++ {
|
|
||||||
if dsn[k] == ':' {
|
|
||||||
cfg.Passwd = dsn[k+1 : j]
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
cfg.User = dsn[:k]
|
|
||||||
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// [protocol[(address)]]
|
|
||||||
// Find the first '(' in dsn[j+1:i]
|
|
||||||
for k = j + 1; k < i; k++ {
|
|
||||||
if dsn[k] == '(' {
|
|
||||||
// dsn[i-1] must be == ')' if an address is specified
|
|
||||||
if dsn[i-1] != ')' {
|
|
||||||
if strings.ContainsRune(dsn[k+1:i], ')') {
|
|
||||||
return nil, errInvalidDSNUnescaped
|
|
||||||
}
|
|
||||||
return nil, errInvalidDSNAddr
|
|
||||||
}
|
|
||||||
cfg.Addr = dsn[k+1 : i-1]
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
cfg.Net = dsn[j+1 : k]
|
|
||||||
}
|
|
||||||
|
|
||||||
// dbname[?param1=value1&...¶mN=valueN]
|
|
||||||
// Find the first '?' in dsn[i+1:]
|
|
||||||
for j = i + 1; j < len(dsn); j++ {
|
|
||||||
if dsn[j] == '?' {
|
|
||||||
if err = parseDSNParams(cfg, dsn[j+1:]); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
cfg.DBName = dsn[i+1 : j]
|
|
||||||
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !foundSlash && len(dsn) > 0 {
|
|
||||||
return nil, errInvalidDSNNoSlash
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.InterpolateParams && unsafeCollations[cfg.Collation] {
|
|
||||||
return nil, errInvalidDSNUnsafeCollation
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set default network if empty
|
|
||||||
if cfg.Net == "" {
|
|
||||||
cfg.Net = "tcp"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set default address if empty
|
|
||||||
if cfg.Addr == "" {
|
|
||||||
switch cfg.Net {
|
|
||||||
case "tcp":
|
|
||||||
cfg.Addr = "127.0.0.1:3306"
|
|
||||||
case "unix":
|
|
||||||
cfg.Addr = "/tmp/mysql.sock"
|
|
||||||
default:
|
|
||||||
return nil, errors.New("default addr for network '" + cfg.Net + "' unknown")
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseDSNParams parses the DSN "query string"
|
|
||||||
// Values must be url.QueryEscape'ed
|
|
||||||
func parseDSNParams(cfg *Config, params string) (err error) {
|
|
||||||
for _, v := range strings.Split(params, "&") {
|
|
||||||
param := strings.SplitN(v, "=", 2)
|
|
||||||
if len(param) != 2 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// cfg params
|
|
||||||
switch value := param[1]; param[0] {
|
|
||||||
|
|
||||||
// Disable INFILE whitelist / enable all files
|
|
||||||
case "allowAllFiles":
|
|
||||||
var isBool bool
|
|
||||||
cfg.AllowAllFiles, isBool = readBool(value)
|
|
||||||
if !isBool {
|
|
||||||
return errors.New("invalid bool value: " + value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use cleartext authentication mode (MySQL 5.5.10+)
|
|
||||||
case "allowCleartextPasswords":
|
|
||||||
var isBool bool
|
|
||||||
cfg.AllowCleartextPasswords, isBool = readBool(value)
|
|
||||||
if !isBool {
|
|
||||||
return errors.New("invalid bool value: " + value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use native password authentication
|
|
||||||
case "allowNativePasswords":
|
|
||||||
var isBool bool
|
|
||||||
cfg.AllowNativePasswords, isBool = readBool(value)
|
|
||||||
if !isBool {
|
|
||||||
return errors.New("invalid bool value: " + value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use old authentication mode (pre MySQL 4.1)
|
|
||||||
case "allowOldPasswords":
|
|
||||||
var isBool bool
|
|
||||||
cfg.AllowOldPasswords, isBool = readBool(value)
|
|
||||||
if !isBool {
|
|
||||||
return errors.New("invalid bool value: " + value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Switch "rowsAffected" mode
|
|
||||||
case "clientFoundRows":
|
|
||||||
var isBool bool
|
|
||||||
cfg.ClientFoundRows, isBool = readBool(value)
|
|
||||||
if !isBool {
|
|
||||||
return errors.New("invalid bool value: " + value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Collation
|
|
||||||
case "collation":
|
|
||||||
cfg.Collation = value
|
|
||||||
break
|
|
||||||
|
|
||||||
case "columnsWithAlias":
|
|
||||||
var isBool bool
|
|
||||||
cfg.ColumnsWithAlias, isBool = readBool(value)
|
|
||||||
if !isBool {
|
|
||||||
return errors.New("invalid bool value: " + value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compression
|
|
||||||
case "compress":
|
|
||||||
return errors.New("compression not implemented yet")
|
|
||||||
|
|
||||||
// Enable client side placeholder substitution
|
|
||||||
case "interpolateParams":
|
|
||||||
var isBool bool
|
|
||||||
cfg.InterpolateParams, isBool = readBool(value)
|
|
||||||
if !isBool {
|
|
||||||
return errors.New("invalid bool value: " + value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Time Location
|
|
||||||
case "loc":
|
|
||||||
if value, err = url.QueryUnescape(value); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
cfg.Loc, err = time.LoadLocation(value)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// multiple statements in one query
|
|
||||||
case "multiStatements":
|
|
||||||
var isBool bool
|
|
||||||
cfg.MultiStatements, isBool = readBool(value)
|
|
||||||
if !isBool {
|
|
||||||
return errors.New("invalid bool value: " + value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// time.Time parsing
|
|
||||||
case "parseTime":
|
|
||||||
var isBool bool
|
|
||||||
cfg.ParseTime, isBool = readBool(value)
|
|
||||||
if !isBool {
|
|
||||||
return errors.New("invalid bool value: " + value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// I/O read Timeout
|
|
||||||
case "readTimeout":
|
|
||||||
cfg.ReadTimeout, err = time.ParseDuration(value)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Strict mode
|
|
||||||
case "strict":
|
|
||||||
var isBool bool
|
|
||||||
cfg.Strict, isBool = readBool(value)
|
|
||||||
if !isBool {
|
|
||||||
return errors.New("invalid bool value: " + value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Dial Timeout
|
|
||||||
case "timeout":
|
|
||||||
cfg.Timeout, err = time.ParseDuration(value)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// TLS-Encryption
|
|
||||||
case "tls":
|
|
||||||
boolValue, isBool := readBool(value)
|
|
||||||
if isBool {
|
|
||||||
if boolValue {
|
|
||||||
cfg.TLSConfig = "true"
|
|
||||||
cfg.tls = &tls.Config{}
|
|
||||||
} else {
|
|
||||||
cfg.TLSConfig = "false"
|
|
||||||
}
|
|
||||||
} else if vl := strings.ToLower(value); vl == "skip-verify" {
|
|
||||||
cfg.TLSConfig = vl
|
|
||||||
cfg.tls = &tls.Config{InsecureSkipVerify: true}
|
|
||||||
} else {
|
|
||||||
name, err := url.QueryUnescape(value)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("invalid value for TLS config name: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if tlsConfig, ok := tlsConfigRegister[name]; ok {
|
|
||||||
if len(tlsConfig.ServerName) == 0 && !tlsConfig.InsecureSkipVerify {
|
|
||||||
host, _, err := net.SplitHostPort(cfg.Addr)
|
|
||||||
if err == nil {
|
|
||||||
tlsConfig.ServerName = host
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
cfg.TLSConfig = name
|
|
||||||
cfg.tls = tlsConfig
|
|
||||||
} else {
|
|
||||||
return errors.New("invalid value / unknown config name: " + name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// I/O write Timeout
|
|
||||||
case "writeTimeout":
|
|
||||||
cfg.WriteTimeout, err = time.ParseDuration(value)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
case "maxAllowedPacket":
|
|
||||||
cfg.MaxAllowedPacket, err = strconv.Atoi(value)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
// lazy init
|
|
||||||
if cfg.Params == nil {
|
|
||||||
cfg.Params = make(map[string]string)
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.Params[param[0]], err = url.QueryUnescape(value); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
|
@ -1,132 +0,0 @@
|
||||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
|
||||||
//
|
|
||||||
// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
|
|
||||||
//
|
|
||||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
||||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
||||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
|
||||||
|
|
||||||
package mysql
|
|
||||||
|
|
||||||
import (
|
|
||||||
"database/sql/driver"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Various errors the driver might return. Can change between driver versions.
|
|
||||||
var (
|
|
||||||
ErrInvalidConn = errors.New("invalid connection")
|
|
||||||
ErrMalformPkt = errors.New("malformed packet")
|
|
||||||
ErrNoTLS = errors.New("TLS requested but server does not support TLS")
|
|
||||||
ErrCleartextPassword = errors.New("this user requires clear text authentication. If you still want to use it, please add 'allowCleartextPasswords=1' to your DSN")
|
|
||||||
ErrNativePassword = errors.New("this user requires mysql native password authentication.")
|
|
||||||
ErrOldPassword = errors.New("this user requires old password authentication. If you still want to use it, please add 'allowOldPasswords=1' to your DSN. See also https://github.com/go-sql-driver/mysql/wiki/old_passwords")
|
|
||||||
ErrUnknownPlugin = errors.New("this authentication plugin is not supported")
|
|
||||||
ErrOldProtocol = errors.New("MySQL server does not support required protocol 41+")
|
|
||||||
ErrPktSync = errors.New("commands out of sync. You can't run this command now")
|
|
||||||
ErrPktSyncMul = errors.New("commands out of sync. Did you run multiple statements at once?")
|
|
||||||
ErrPktTooLarge = errors.New("packet for query is too large. Try adjusting the 'max_allowed_packet' variable on the server")
|
|
||||||
ErrBusyBuffer = errors.New("busy buffer")
|
|
||||||
)
|
|
||||||
|
|
||||||
var errLog = Logger(log.New(os.Stderr, "[mysql] ", log.Ldate|log.Ltime|log.Lshortfile))
|
|
||||||
|
|
||||||
// Logger is used to log critical error messages.
|
|
||||||
type Logger interface {
|
|
||||||
Print(v ...interface{})
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetLogger is used to set the logger for critical errors.
|
|
||||||
// The initial logger is os.Stderr.
|
|
||||||
func SetLogger(logger Logger) error {
|
|
||||||
if logger == nil {
|
|
||||||
return errors.New("logger is nil")
|
|
||||||
}
|
|
||||||
errLog = logger
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MySQLError is an error type which represents a single MySQL error
|
|
||||||
type MySQLError struct {
|
|
||||||
Number uint16
|
|
||||||
Message string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (me *MySQLError) Error() string {
|
|
||||||
return fmt.Sprintf("Error %d: %s", me.Number, me.Message)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MySQLWarnings is an error type which represents a group of one or more MySQL
|
|
||||||
// warnings
|
|
||||||
type MySQLWarnings []MySQLWarning
|
|
||||||
|
|
||||||
func (mws MySQLWarnings) Error() string {
|
|
||||||
var msg string
|
|
||||||
for i, warning := range mws {
|
|
||||||
if i > 0 {
|
|
||||||
msg += "\r\n"
|
|
||||||
}
|
|
||||||
msg += fmt.Sprintf(
|
|
||||||
"%s %s: %s",
|
|
||||||
warning.Level,
|
|
||||||
warning.Code,
|
|
||||||
warning.Message,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
return msg
|
|
||||||
}
|
|
||||||
|
|
||||||
// MySQLWarning is an error type which represents a single MySQL warning.
|
|
||||||
// Warnings are returned in groups only. See MySQLWarnings
|
|
||||||
type MySQLWarning struct {
|
|
||||||
Level string
|
|
||||||
Code string
|
|
||||||
Message string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mc *mysqlConn) getWarnings() (err error) {
|
|
||||||
rows, err := mc.Query("SHOW WARNINGS", nil)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var warnings = MySQLWarnings{}
|
|
||||||
var values = make([]driver.Value, 3)
|
|
||||||
|
|
||||||
for {
|
|
||||||
err = rows.Next(values)
|
|
||||||
switch err {
|
|
||||||
case nil:
|
|
||||||
warning := MySQLWarning{}
|
|
||||||
|
|
||||||
if raw, ok := values[0].([]byte); ok {
|
|
||||||
warning.Level = string(raw)
|
|
||||||
} else {
|
|
||||||
warning.Level = fmt.Sprintf("%s", values[0])
|
|
||||||
}
|
|
||||||
if raw, ok := values[1].([]byte); ok {
|
|
||||||
warning.Code = string(raw)
|
|
||||||
} else {
|
|
||||||
warning.Code = fmt.Sprintf("%s", values[1])
|
|
||||||
}
|
|
||||||
if raw, ok := values[2].([]byte); ok {
|
|
||||||
warning.Message = string(raw)
|
|
||||||
} else {
|
|
||||||
warning.Message = fmt.Sprintf("%s", values[0])
|
|
||||||
}
|
|
||||||
|
|
||||||
warnings = append(warnings, warning)
|
|
||||||
|
|
||||||
case io.EOF:
|
|
||||||
return warnings
|
|
||||||
|
|
||||||
default:
|
|
||||||
rows.Close()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,182 +0,0 @@
|
||||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
|
||||||
//
|
|
||||||
// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
|
|
||||||
//
|
|
||||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
||||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
||||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
|
||||||
|
|
||||||
package mysql
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
fileRegister map[string]bool
|
|
||||||
fileRegisterLock sync.RWMutex
|
|
||||||
readerRegister map[string]func() io.Reader
|
|
||||||
readerRegisterLock sync.RWMutex
|
|
||||||
)
|
|
||||||
|
|
||||||
// RegisterLocalFile adds the given file to the file whitelist,
|
|
||||||
// so that it can be used by "LOAD DATA LOCAL INFILE <filepath>".
|
|
||||||
// Alternatively you can allow the use of all local files with
|
|
||||||
// the DSN parameter 'allowAllFiles=true'
|
|
||||||
//
|
|
||||||
// filePath := "/home/gopher/data.csv"
|
|
||||||
// mysql.RegisterLocalFile(filePath)
|
|
||||||
// err := db.Exec("LOAD DATA LOCAL INFILE '" + filePath + "' INTO TABLE foo")
|
|
||||||
// if err != nil {
|
|
||||||
// ...
|
|
||||||
//
|
|
||||||
func RegisterLocalFile(filePath string) {
|
|
||||||
fileRegisterLock.Lock()
|
|
||||||
// lazy map init
|
|
||||||
if fileRegister == nil {
|
|
||||||
fileRegister = make(map[string]bool)
|
|
||||||
}
|
|
||||||
|
|
||||||
fileRegister[strings.Trim(filePath, `"`)] = true
|
|
||||||
fileRegisterLock.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeregisterLocalFile removes the given filepath from the whitelist.
|
|
||||||
func DeregisterLocalFile(filePath string) {
|
|
||||||
fileRegisterLock.Lock()
|
|
||||||
delete(fileRegister, strings.Trim(filePath, `"`))
|
|
||||||
fileRegisterLock.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// RegisterReaderHandler registers a handler function which is used
|
|
||||||
// to receive a io.Reader.
|
|
||||||
// The Reader can be used by "LOAD DATA LOCAL INFILE Reader::<name>".
|
|
||||||
// If the handler returns a io.ReadCloser Close() is called when the
|
|
||||||
// request is finished.
|
|
||||||
//
|
|
||||||
// mysql.RegisterReaderHandler("data", func() io.Reader {
|
|
||||||
// var csvReader io.Reader // Some Reader that returns CSV data
|
|
||||||
// ... // Open Reader here
|
|
||||||
// return csvReader
|
|
||||||
// })
|
|
||||||
// err := db.Exec("LOAD DATA LOCAL INFILE 'Reader::data' INTO TABLE foo")
|
|
||||||
// if err != nil {
|
|
||||||
// ...
|
|
||||||
//
|
|
||||||
func RegisterReaderHandler(name string, handler func() io.Reader) {
|
|
||||||
readerRegisterLock.Lock()
|
|
||||||
// lazy map init
|
|
||||||
if readerRegister == nil {
|
|
||||||
readerRegister = make(map[string]func() io.Reader)
|
|
||||||
}
|
|
||||||
|
|
||||||
readerRegister[name] = handler
|
|
||||||
readerRegisterLock.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeregisterReaderHandler removes the ReaderHandler function with
|
|
||||||
// the given name from the registry.
|
|
||||||
func DeregisterReaderHandler(name string) {
|
|
||||||
readerRegisterLock.Lock()
|
|
||||||
delete(readerRegister, name)
|
|
||||||
readerRegisterLock.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func deferredClose(err *error, closer io.Closer) {
|
|
||||||
closeErr := closer.Close()
|
|
||||||
if *err == nil {
|
|
||||||
*err = closeErr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
|
|
||||||
var rdr io.Reader
|
|
||||||
var data []byte
|
|
||||||
packetSize := 16 * 1024 // 16KB is small enough for disk readahead and large enough for TCP
|
|
||||||
if mc.maxWriteSize < packetSize {
|
|
||||||
packetSize = mc.maxWriteSize
|
|
||||||
}
|
|
||||||
|
|
||||||
if idx := strings.Index(name, "Reader::"); idx == 0 || (idx > 0 && name[idx-1] == '/') { // io.Reader
|
|
||||||
// The server might return an an absolute path. See issue #355.
|
|
||||||
name = name[idx+8:]
|
|
||||||
|
|
||||||
readerRegisterLock.RLock()
|
|
||||||
handler, inMap := readerRegister[name]
|
|
||||||
readerRegisterLock.RUnlock()
|
|
||||||
|
|
||||||
if inMap {
|
|
||||||
rdr = handler()
|
|
||||||
if rdr != nil {
|
|
||||||
if cl, ok := rdr.(io.Closer); ok {
|
|
||||||
defer deferredClose(&err, cl)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
err = fmt.Errorf("Reader '%s' is <nil>", name)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
err = fmt.Errorf("Reader '%s' is not registered", name)
|
|
||||||
}
|
|
||||||
} else { // File
|
|
||||||
name = strings.Trim(name, `"`)
|
|
||||||
fileRegisterLock.RLock()
|
|
||||||
fr := fileRegister[name]
|
|
||||||
fileRegisterLock.RUnlock()
|
|
||||||
if mc.cfg.AllowAllFiles || fr {
|
|
||||||
var file *os.File
|
|
||||||
var fi os.FileInfo
|
|
||||||
|
|
||||||
if file, err = os.Open(name); err == nil {
|
|
||||||
defer deferredClose(&err, file)
|
|
||||||
|
|
||||||
// get file size
|
|
||||||
if fi, err = file.Stat(); err == nil {
|
|
||||||
rdr = file
|
|
||||||
if fileSize := int(fi.Size()); fileSize < packetSize {
|
|
||||||
packetSize = fileSize
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
err = fmt.Errorf("local file '%s' is not registered", name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// send content packets
|
|
||||||
if err == nil {
|
|
||||||
data := make([]byte, 4+packetSize)
|
|
||||||
var n int
|
|
||||||
for err == nil {
|
|
||||||
n, err = rdr.Read(data[4:])
|
|
||||||
if n > 0 {
|
|
||||||
if ioErr := mc.writePacket(data[:4+n]); ioErr != nil {
|
|
||||||
return ioErr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err == io.EOF {
|
|
||||||
err = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// send empty packet (termination)
|
|
||||||
if data == nil {
|
|
||||||
data = make([]byte, 4)
|
|
||||||
}
|
|
||||||
if ioErr := mc.writePacket(data[:4]); ioErr != nil {
|
|
||||||
return ioErr
|
|
||||||
}
|
|
||||||
|
|
||||||
// read OK packet
|
|
||||||
if err == nil {
|
|
||||||
_, err = mc.readResultOK()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
mc.readPacket()
|
|
||||||
return err
|
|
||||||
}
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,22 +0,0 @@
|
||||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
|
||||||
//
|
|
||||||
// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
|
|
||||||
//
|
|
||||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
||||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
||||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
|
||||||
|
|
||||||
package mysql
|
|
||||||
|
|
||||||
type mysqlResult struct {
|
|
||||||
affectedRows int64
|
|
||||||
insertId int64
|
|
||||||
}
|
|
||||||
|
|
||||||
func (res *mysqlResult) LastInsertId() (int64, error) {
|
|
||||||
return res.insertId, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (res *mysqlResult) RowsAffected() (int64, error) {
|
|
||||||
return res.affectedRows, nil
|
|
||||||
}
|
|
|
@ -1,112 +0,0 @@
|
||||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
|
||||||
//
|
|
||||||
// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
|
|
||||||
//
|
|
||||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
||||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
||||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
|
||||||
|
|
||||||
package mysql
|
|
||||||
|
|
||||||
import (
|
|
||||||
"database/sql/driver"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
type mysqlField struct {
|
|
||||||
tableName string
|
|
||||||
name string
|
|
||||||
flags fieldFlag
|
|
||||||
fieldType byte
|
|
||||||
decimals byte
|
|
||||||
}
|
|
||||||
|
|
||||||
type mysqlRows struct {
|
|
||||||
mc *mysqlConn
|
|
||||||
columns []mysqlField
|
|
||||||
}
|
|
||||||
|
|
||||||
type binaryRows struct {
|
|
||||||
mysqlRows
|
|
||||||
}
|
|
||||||
|
|
||||||
type textRows struct {
|
|
||||||
mysqlRows
|
|
||||||
}
|
|
||||||
|
|
||||||
type emptyRows struct{}
|
|
||||||
|
|
||||||
func (rows *mysqlRows) Columns() []string {
|
|
||||||
columns := make([]string, len(rows.columns))
|
|
||||||
if rows.mc != nil && rows.mc.cfg.ColumnsWithAlias {
|
|
||||||
for i := range columns {
|
|
||||||
if tableName := rows.columns[i].tableName; len(tableName) > 0 {
|
|
||||||
columns[i] = tableName + "." + rows.columns[i].name
|
|
||||||
} else {
|
|
||||||
columns[i] = rows.columns[i].name
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
for i := range columns {
|
|
||||||
columns[i] = rows.columns[i].name
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return columns
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rows *mysqlRows) Close() error {
|
|
||||||
mc := rows.mc
|
|
||||||
if mc == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if mc.netConn == nil {
|
|
||||||
return ErrInvalidConn
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove unread packets from stream
|
|
||||||
err := mc.readUntilEOF()
|
|
||||||
if err == nil {
|
|
||||||
if err = mc.discardResults(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
rows.mc = nil
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rows *binaryRows) Next(dest []driver.Value) error {
|
|
||||||
if mc := rows.mc; mc != nil {
|
|
||||||
if mc.netConn == nil {
|
|
||||||
return ErrInvalidConn
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fetch next row from stream
|
|
||||||
return rows.readRow(dest)
|
|
||||||
}
|
|
||||||
return io.EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rows *textRows) Next(dest []driver.Value) error {
|
|
||||||
if mc := rows.mc; mc != nil {
|
|
||||||
if mc.netConn == nil {
|
|
||||||
return ErrInvalidConn
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fetch next row from stream
|
|
||||||
return rows.readRow(dest)
|
|
||||||
}
|
|
||||||
return io.EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rows emptyRows) Columns() []string {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rows emptyRows) Close() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rows emptyRows) Next(dest []driver.Value) error {
|
|
||||||
return io.EOF
|
|
||||||
}
|
|
|
@ -1,153 +0,0 @@
|
||||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
|
||||||
//
|
|
||||||
// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
|
|
||||||
//
|
|
||||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
||||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
||||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
|
||||||
|
|
||||||
package mysql
|
|
||||||
|
|
||||||
import (
|
|
||||||
"database/sql/driver"
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"strconv"
|
|
||||||
)
|
|
||||||
|
|
||||||
type mysqlStmt struct {
|
|
||||||
mc *mysqlConn
|
|
||||||
id uint32
|
|
||||||
paramCount int
|
|
||||||
columns []mysqlField // cached from the first query
|
|
||||||
}
|
|
||||||
|
|
||||||
func (stmt *mysqlStmt) Close() error {
|
|
||||||
if stmt.mc == nil || stmt.mc.netConn == nil {
|
|
||||||
// driver.Stmt.Close can be called more than once, thus this function
|
|
||||||
// has to be idempotent.
|
|
||||||
// See also Issue #450 and golang/go#16019.
|
|
||||||
//errLog.Print(ErrInvalidConn)
|
|
||||||
return driver.ErrBadConn
|
|
||||||
}
|
|
||||||
|
|
||||||
err := stmt.mc.writeCommandPacketUint32(comStmtClose, stmt.id)
|
|
||||||
stmt.mc = nil
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (stmt *mysqlStmt) NumInput() int {
|
|
||||||
return stmt.paramCount
|
|
||||||
}
|
|
||||||
|
|
||||||
func (stmt *mysqlStmt) ColumnConverter(idx int) driver.ValueConverter {
|
|
||||||
return converter{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) {
|
|
||||||
if stmt.mc.netConn == nil {
|
|
||||||
errLog.Print(ErrInvalidConn)
|
|
||||||
return nil, driver.ErrBadConn
|
|
||||||
}
|
|
||||||
// Send command
|
|
||||||
err := stmt.writeExecutePacket(args)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
mc := stmt.mc
|
|
||||||
|
|
||||||
mc.affectedRows = 0
|
|
||||||
mc.insertId = 0
|
|
||||||
|
|
||||||
// Read Result
|
|
||||||
resLen, err := mc.readResultSetHeaderPacket()
|
|
||||||
if err == nil {
|
|
||||||
if resLen > 0 {
|
|
||||||
// Columns
|
|
||||||
err = mc.readUntilEOF()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rows
|
|
||||||
err = mc.readUntilEOF()
|
|
||||||
}
|
|
||||||
if err == nil {
|
|
||||||
return &mysqlResult{
|
|
||||||
affectedRows: int64(mc.affectedRows),
|
|
||||||
insertId: int64(mc.insertId),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) {
|
|
||||||
if stmt.mc.netConn == nil {
|
|
||||||
errLog.Print(ErrInvalidConn)
|
|
||||||
return nil, driver.ErrBadConn
|
|
||||||
}
|
|
||||||
// Send command
|
|
||||||
err := stmt.writeExecutePacket(args)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
mc := stmt.mc
|
|
||||||
|
|
||||||
// Read Result
|
|
||||||
resLen, err := mc.readResultSetHeaderPacket()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
rows := new(binaryRows)
|
|
||||||
|
|
||||||
if resLen > 0 {
|
|
||||||
rows.mc = mc
|
|
||||||
// Columns
|
|
||||||
// If not cached, read them and cache them
|
|
||||||
if stmt.columns == nil {
|
|
||||||
rows.columns, err = mc.readColumns(resLen)
|
|
||||||
stmt.columns = rows.columns
|
|
||||||
} else {
|
|
||||||
rows.columns = stmt.columns
|
|
||||||
err = mc.readUntilEOF()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return rows, err
|
|
||||||
}
|
|
||||||
|
|
||||||
type converter struct{}
|
|
||||||
|
|
||||||
func (c converter) ConvertValue(v interface{}) (driver.Value, error) {
|
|
||||||
if driver.IsValue(v) {
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
rv := reflect.ValueOf(v)
|
|
||||||
switch rv.Kind() {
|
|
||||||
case reflect.Ptr:
|
|
||||||
// indirect pointers
|
|
||||||
if rv.IsNil() {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
return c.ConvertValue(rv.Elem().Interface())
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
|
||||||
return rv.Int(), nil
|
|
||||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32:
|
|
||||||
return int64(rv.Uint()), nil
|
|
||||||
case reflect.Uint64:
|
|
||||||
u64 := rv.Uint()
|
|
||||||
if u64 >= 1<<63 {
|
|
||||||
return strconv.FormatUint(u64, 10), nil
|
|
||||||
}
|
|
||||||
return int64(u64), nil
|
|
||||||
case reflect.Float32, reflect.Float64:
|
|
||||||
return rv.Float(), nil
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("unsupported type %T, a %s", v, rv.Kind())
|
|
||||||
}
|
|
|
@ -1,31 +0,0 @@
|
||||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
|
||||||
//
|
|
||||||
// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
|
|
||||||
//
|
|
||||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
||||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
||||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
|
||||||
|
|
||||||
package mysql
|
|
||||||
|
|
||||||
type mysqlTx struct {
|
|
||||||
mc *mysqlConn
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tx *mysqlTx) Commit() (err error) {
|
|
||||||
if tx.mc == nil || tx.mc.netConn == nil {
|
|
||||||
return ErrInvalidConn
|
|
||||||
}
|
|
||||||
err = tx.mc.exec("COMMIT")
|
|
||||||
tx.mc = nil
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tx *mysqlTx) Rollback() (err error) {
|
|
||||||
if tx.mc == nil || tx.mc.netConn == nil {
|
|
||||||
return ErrInvalidConn
|
|
||||||
}
|
|
||||||
err = tx.mc.exec("ROLLBACK")
|
|
||||||
tx.mc = nil
|
|
||||||
return
|
|
||||||
}
|
|
|
@ -1,740 +0,0 @@
|
||||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
|
||||||
//
|
|
||||||
// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
|
|
||||||
//
|
|
||||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
||||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
||||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
|
||||||
|
|
||||||
package mysql
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/sha1"
|
|
||||||
"crypto/tls"
|
|
||||||
"database/sql/driver"
|
|
||||||
"encoding/binary"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
tlsConfigRegister map[string]*tls.Config // Register for custom tls.Configs
|
|
||||||
)
|
|
||||||
|
|
||||||
// RegisterTLSConfig registers a custom tls.Config to be used with sql.Open.
|
|
||||||
// Use the key as a value in the DSN where tls=value.
|
|
||||||
//
|
|
||||||
// rootCertPool := x509.NewCertPool()
|
|
||||||
// pem, err := ioutil.ReadFile("/path/ca-cert.pem")
|
|
||||||
// if err != nil {
|
|
||||||
// log.Fatal(err)
|
|
||||||
// }
|
|
||||||
// if ok := rootCertPool.AppendCertsFromPEM(pem); !ok {
|
|
||||||
// log.Fatal("Failed to append PEM.")
|
|
||||||
// }
|
|
||||||
// clientCert := make([]tls.Certificate, 0, 1)
|
|
||||||
// certs, err := tls.LoadX509KeyPair("/path/client-cert.pem", "/path/client-key.pem")
|
|
||||||
// if err != nil {
|
|
||||||
// log.Fatal(err)
|
|
||||||
// }
|
|
||||||
// clientCert = append(clientCert, certs)
|
|
||||||
// mysql.RegisterTLSConfig("custom", &tls.Config{
|
|
||||||
// RootCAs: rootCertPool,
|
|
||||||
// Certificates: clientCert,
|
|
||||||
// })
|
|
||||||
// db, err := sql.Open("mysql", "user@tcp(localhost:3306)/test?tls=custom")
|
|
||||||
//
|
|
||||||
func RegisterTLSConfig(key string, config *tls.Config) error {
|
|
||||||
if _, isBool := readBool(key); isBool || strings.ToLower(key) == "skip-verify" {
|
|
||||||
return fmt.Errorf("key '%s' is reserved", key)
|
|
||||||
}
|
|
||||||
|
|
||||||
if tlsConfigRegister == nil {
|
|
||||||
tlsConfigRegister = make(map[string]*tls.Config)
|
|
||||||
}
|
|
||||||
|
|
||||||
tlsConfigRegister[key] = config
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeregisterTLSConfig removes the tls.Config associated with key.
|
|
||||||
func DeregisterTLSConfig(key string) {
|
|
||||||
if tlsConfigRegister != nil {
|
|
||||||
delete(tlsConfigRegister, key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns the bool value of the input.
|
|
||||||
// The 2nd return value indicates if the input was a valid bool value
|
|
||||||
func readBool(input string) (value bool, valid bool) {
|
|
||||||
switch input {
|
|
||||||
case "1", "true", "TRUE", "True":
|
|
||||||
return true, true
|
|
||||||
case "0", "false", "FALSE", "False":
|
|
||||||
return false, true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Not a valid bool value
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
/******************************************************************************
|
|
||||||
* Authentication *
|
|
||||||
******************************************************************************/
|
|
||||||
|
|
||||||
// Encrypt password using 4.1+ method
|
|
||||||
func scramblePassword(scramble, password []byte) []byte {
|
|
||||||
if len(password) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// stage1Hash = SHA1(password)
|
|
||||||
crypt := sha1.New()
|
|
||||||
crypt.Write(password)
|
|
||||||
stage1 := crypt.Sum(nil)
|
|
||||||
|
|
||||||
// scrambleHash = SHA1(scramble + SHA1(stage1Hash))
|
|
||||||
// inner Hash
|
|
||||||
crypt.Reset()
|
|
||||||
crypt.Write(stage1)
|
|
||||||
hash := crypt.Sum(nil)
|
|
||||||
|
|
||||||
// outer Hash
|
|
||||||
crypt.Reset()
|
|
||||||
crypt.Write(scramble)
|
|
||||||
crypt.Write(hash)
|
|
||||||
scramble = crypt.Sum(nil)
|
|
||||||
|
|
||||||
// token = scrambleHash XOR stage1Hash
|
|
||||||
for i := range scramble {
|
|
||||||
scramble[i] ^= stage1[i]
|
|
||||||
}
|
|
||||||
return scramble
|
|
||||||
}
|
|
||||||
|
|
||||||
// Encrypt password using pre 4.1 (old password) method
|
|
||||||
// https://github.com/atcurtis/mariadb/blob/master/mysys/my_rnd.c
|
|
||||||
type myRnd struct {
|
|
||||||
seed1, seed2 uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
const myRndMaxVal = 0x3FFFFFFF
|
|
||||||
|
|
||||||
// Pseudo random number generator
|
|
||||||
func newMyRnd(seed1, seed2 uint32) *myRnd {
|
|
||||||
return &myRnd{
|
|
||||||
seed1: seed1 % myRndMaxVal,
|
|
||||||
seed2: seed2 % myRndMaxVal,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tested to be equivalent to MariaDB's floating point variant
|
|
||||||
// http://play.golang.org/p/QHvhd4qved
|
|
||||||
// http://play.golang.org/p/RG0q4ElWDx
|
|
||||||
func (r *myRnd) NextByte() byte {
|
|
||||||
r.seed1 = (r.seed1*3 + r.seed2) % myRndMaxVal
|
|
||||||
r.seed2 = (r.seed1 + r.seed2 + 33) % myRndMaxVal
|
|
||||||
|
|
||||||
return byte(uint64(r.seed1) * 31 / myRndMaxVal)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generate binary hash from byte string using insecure pre 4.1 method
|
|
||||||
func pwHash(password []byte) (result [2]uint32) {
|
|
||||||
var add uint32 = 7
|
|
||||||
var tmp uint32
|
|
||||||
|
|
||||||
result[0] = 1345345333
|
|
||||||
result[1] = 0x12345671
|
|
||||||
|
|
||||||
for _, c := range password {
|
|
||||||
// skip spaces and tabs in password
|
|
||||||
if c == ' ' || c == '\t' {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
tmp = uint32(c)
|
|
||||||
result[0] ^= (((result[0] & 63) + add) * tmp) + (result[0] << 8)
|
|
||||||
result[1] += (result[1] << 8) ^ result[0]
|
|
||||||
add += tmp
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove sign bit (1<<31)-1)
|
|
||||||
result[0] &= 0x7FFFFFFF
|
|
||||||
result[1] &= 0x7FFFFFFF
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Encrypt password using insecure pre 4.1 method
|
|
||||||
func scrambleOldPassword(scramble, password []byte) []byte {
|
|
||||||
if len(password) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
scramble = scramble[:8]
|
|
||||||
|
|
||||||
hashPw := pwHash(password)
|
|
||||||
hashSc := pwHash(scramble)
|
|
||||||
|
|
||||||
r := newMyRnd(hashPw[0]^hashSc[0], hashPw[1]^hashSc[1])
|
|
||||||
|
|
||||||
var out [8]byte
|
|
||||||
for i := range out {
|
|
||||||
out[i] = r.NextByte() + 64
|
|
||||||
}
|
|
||||||
|
|
||||||
mask := r.NextByte()
|
|
||||||
for i := range out {
|
|
||||||
out[i] ^= mask
|
|
||||||
}
|
|
||||||
|
|
||||||
return out[:]
|
|
||||||
}
|
|
||||||
|
|
||||||
/******************************************************************************
|
|
||||||
* Time related utils *
|
|
||||||
******************************************************************************/
|
|
||||||
|
|
||||||
// NullTime represents a time.Time that may be NULL.
|
|
||||||
// NullTime implements the Scanner interface so
|
|
||||||
// it can be used as a scan destination:
|
|
||||||
//
|
|
||||||
// var nt NullTime
|
|
||||||
// err := db.QueryRow("SELECT time FROM foo WHERE id=?", id).Scan(&nt)
|
|
||||||
// ...
|
|
||||||
// if nt.Valid {
|
|
||||||
// // use nt.Time
|
|
||||||
// } else {
|
|
||||||
// // NULL value
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// This NullTime implementation is not driver-specific
|
|
||||||
type NullTime struct {
|
|
||||||
Time time.Time
|
|
||||||
Valid bool // Valid is true if Time is not NULL
|
|
||||||
}
|
|
||||||
|
|
||||||
// Scan implements the Scanner interface.
|
|
||||||
// The value type must be time.Time or string / []byte (formatted time-string),
|
|
||||||
// otherwise Scan fails.
|
|
||||||
func (nt *NullTime) Scan(value interface{}) (err error) {
|
|
||||||
if value == nil {
|
|
||||||
nt.Time, nt.Valid = time.Time{}, false
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
switch v := value.(type) {
|
|
||||||
case time.Time:
|
|
||||||
nt.Time, nt.Valid = v, true
|
|
||||||
return
|
|
||||||
case []byte:
|
|
||||||
nt.Time, err = parseDateTime(string(v), time.UTC)
|
|
||||||
nt.Valid = (err == nil)
|
|
||||||
return
|
|
||||||
case string:
|
|
||||||
nt.Time, err = parseDateTime(v, time.UTC)
|
|
||||||
nt.Valid = (err == nil)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
nt.Valid = false
|
|
||||||
return fmt.Errorf("Can't convert %T to time.Time", value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Value implements the driver Valuer interface.
|
|
||||||
func (nt NullTime) Value() (driver.Value, error) {
|
|
||||||
if !nt.Valid {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
return nt.Time, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseDateTime(str string, loc *time.Location) (t time.Time, err error) {
|
|
||||||
base := "0000-00-00 00:00:00.0000000"
|
|
||||||
switch len(str) {
|
|
||||||
case 10, 19, 21, 22, 23, 24, 25, 26: // up to "YYYY-MM-DD HH:MM:SS.MMMMMM"
|
|
||||||
if str == base[:len(str)] {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t, err = time.Parse(timeFormat[:len(str)], str)
|
|
||||||
default:
|
|
||||||
err = fmt.Errorf("invalid time string: %s", str)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Adjust location
|
|
||||||
if err == nil && loc != time.UTC {
|
|
||||||
y, mo, d := t.Date()
|
|
||||||
h, mi, s := t.Clock()
|
|
||||||
t, err = time.Date(y, mo, d, h, mi, s, t.Nanosecond(), loc), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseBinaryDateTime(num uint64, data []byte, loc *time.Location) (driver.Value, error) {
|
|
||||||
switch num {
|
|
||||||
case 0:
|
|
||||||
return time.Time{}, nil
|
|
||||||
case 4:
|
|
||||||
return time.Date(
|
|
||||||
int(binary.LittleEndian.Uint16(data[:2])), // year
|
|
||||||
time.Month(data[2]), // month
|
|
||||||
int(data[3]), // day
|
|
||||||
0, 0, 0, 0,
|
|
||||||
loc,
|
|
||||||
), nil
|
|
||||||
case 7:
|
|
||||||
return time.Date(
|
|
||||||
int(binary.LittleEndian.Uint16(data[:2])), // year
|
|
||||||
time.Month(data[2]), // month
|
|
||||||
int(data[3]), // day
|
|
||||||
int(data[4]), // hour
|
|
||||||
int(data[5]), // minutes
|
|
||||||
int(data[6]), // seconds
|
|
||||||
0,
|
|
||||||
loc,
|
|
||||||
), nil
|
|
||||||
case 11:
|
|
||||||
return time.Date(
|
|
||||||
int(binary.LittleEndian.Uint16(data[:2])), // year
|
|
||||||
time.Month(data[2]), // month
|
|
||||||
int(data[3]), // day
|
|
||||||
int(data[4]), // hour
|
|
||||||
int(data[5]), // minutes
|
|
||||||
int(data[6]), // seconds
|
|
||||||
int(binary.LittleEndian.Uint32(data[7:11]))*1000, // nanoseconds
|
|
||||||
loc,
|
|
||||||
), nil
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("invalid DATETIME packet length %d", num)
|
|
||||||
}
|
|
||||||
|
|
||||||
// zeroDateTime is used in formatBinaryDateTime to avoid an allocation
|
|
||||||
// if the DATE or DATETIME has the zero value.
|
|
||||||
// It must never be changed.
|
|
||||||
// The current behavior depends on database/sql copying the result.
|
|
||||||
var zeroDateTime = []byte("0000-00-00 00:00:00.000000")
|
|
||||||
|
|
||||||
const digits01 = "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789"
|
|
||||||
const digits10 = "0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999"
|
|
||||||
|
|
||||||
func formatBinaryDateTime(src []byte, length uint8, justTime bool) (driver.Value, error) {
|
|
||||||
// length expects the deterministic length of the zero value,
|
|
||||||
// negative time and 100+ hours are automatically added if needed
|
|
||||||
if len(src) == 0 {
|
|
||||||
if justTime {
|
|
||||||
return zeroDateTime[11 : 11+length], nil
|
|
||||||
}
|
|
||||||
return zeroDateTime[:length], nil
|
|
||||||
}
|
|
||||||
var dst []byte // return value
|
|
||||||
var pt, p1, p2, p3 byte // current digit pair
|
|
||||||
var zOffs byte // offset of value in zeroDateTime
|
|
||||||
if justTime {
|
|
||||||
switch length {
|
|
||||||
case
|
|
||||||
8, // time (can be up to 10 when negative and 100+ hours)
|
|
||||||
10, 11, 12, 13, 14, 15: // time with fractional seconds
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("illegal TIME length %d", length)
|
|
||||||
}
|
|
||||||
switch len(src) {
|
|
||||||
case 8, 12:
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("invalid TIME packet length %d", len(src))
|
|
||||||
}
|
|
||||||
// +2 to enable negative time and 100+ hours
|
|
||||||
dst = make([]byte, 0, length+2)
|
|
||||||
if src[0] == 1 {
|
|
||||||
dst = append(dst, '-')
|
|
||||||
}
|
|
||||||
if src[1] != 0 {
|
|
||||||
hour := uint16(src[1])*24 + uint16(src[5])
|
|
||||||
pt = byte(hour / 100)
|
|
||||||
p1 = byte(hour - 100*uint16(pt))
|
|
||||||
dst = append(dst, digits01[pt])
|
|
||||||
} else {
|
|
||||||
p1 = src[5]
|
|
||||||
}
|
|
||||||
zOffs = 11
|
|
||||||
src = src[6:]
|
|
||||||
} else {
|
|
||||||
switch length {
|
|
||||||
case 10, 19, 21, 22, 23, 24, 25, 26:
|
|
||||||
default:
|
|
||||||
t := "DATE"
|
|
||||||
if length > 10 {
|
|
||||||
t += "TIME"
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("illegal %s length %d", t, length)
|
|
||||||
}
|
|
||||||
switch len(src) {
|
|
||||||
case 4, 7, 11:
|
|
||||||
default:
|
|
||||||
t := "DATE"
|
|
||||||
if length > 10 {
|
|
||||||
t += "TIME"
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("illegal %s packet length %d", t, len(src))
|
|
||||||
}
|
|
||||||
dst = make([]byte, 0, length)
|
|
||||||
// start with the date
|
|
||||||
year := binary.LittleEndian.Uint16(src[:2])
|
|
||||||
pt = byte(year / 100)
|
|
||||||
p1 = byte(year - 100*uint16(pt))
|
|
||||||
p2, p3 = src[2], src[3]
|
|
||||||
dst = append(dst,
|
|
||||||
digits10[pt], digits01[pt],
|
|
||||||
digits10[p1], digits01[p1], '-',
|
|
||||||
digits10[p2], digits01[p2], '-',
|
|
||||||
digits10[p3], digits01[p3],
|
|
||||||
)
|
|
||||||
if length == 10 {
|
|
||||||
return dst, nil
|
|
||||||
}
|
|
||||||
if len(src) == 4 {
|
|
||||||
return append(dst, zeroDateTime[10:length]...), nil
|
|
||||||
}
|
|
||||||
dst = append(dst, ' ')
|
|
||||||
p1 = src[4] // hour
|
|
||||||
src = src[5:]
|
|
||||||
}
|
|
||||||
// p1 is 2-digit hour, src is after hour
|
|
||||||
p2, p3 = src[0], src[1]
|
|
||||||
dst = append(dst,
|
|
||||||
digits10[p1], digits01[p1], ':',
|
|
||||||
digits10[p2], digits01[p2], ':',
|
|
||||||
digits10[p3], digits01[p3],
|
|
||||||
)
|
|
||||||
if length <= byte(len(dst)) {
|
|
||||||
return dst, nil
|
|
||||||
}
|
|
||||||
src = src[2:]
|
|
||||||
if len(src) == 0 {
|
|
||||||
return append(dst, zeroDateTime[19:zOffs+length]...), nil
|
|
||||||
}
|
|
||||||
microsecs := binary.LittleEndian.Uint32(src[:4])
|
|
||||||
p1 = byte(microsecs / 10000)
|
|
||||||
microsecs -= 10000 * uint32(p1)
|
|
||||||
p2 = byte(microsecs / 100)
|
|
||||||
microsecs -= 100 * uint32(p2)
|
|
||||||
p3 = byte(microsecs)
|
|
||||||
switch decimals := zOffs + length - 20; decimals {
|
|
||||||
default:
|
|
||||||
return append(dst, '.',
|
|
||||||
digits10[p1], digits01[p1],
|
|
||||||
digits10[p2], digits01[p2],
|
|
||||||
digits10[p3], digits01[p3],
|
|
||||||
), nil
|
|
||||||
case 1:
|
|
||||||
return append(dst, '.',
|
|
||||||
digits10[p1],
|
|
||||||
), nil
|
|
||||||
case 2:
|
|
||||||
return append(dst, '.',
|
|
||||||
digits10[p1], digits01[p1],
|
|
||||||
), nil
|
|
||||||
case 3:
|
|
||||||
return append(dst, '.',
|
|
||||||
digits10[p1], digits01[p1],
|
|
||||||
digits10[p2],
|
|
||||||
), nil
|
|
||||||
case 4:
|
|
||||||
return append(dst, '.',
|
|
||||||
digits10[p1], digits01[p1],
|
|
||||||
digits10[p2], digits01[p2],
|
|
||||||
), nil
|
|
||||||
case 5:
|
|
||||||
return append(dst, '.',
|
|
||||||
digits10[p1], digits01[p1],
|
|
||||||
digits10[p2], digits01[p2],
|
|
||||||
digits10[p3],
|
|
||||||
), nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/******************************************************************************
|
|
||||||
* Convert from and to bytes *
|
|
||||||
******************************************************************************/
|
|
||||||
|
|
||||||
func uint64ToBytes(n uint64) []byte {
|
|
||||||
return []byte{
|
|
||||||
byte(n),
|
|
||||||
byte(n >> 8),
|
|
||||||
byte(n >> 16),
|
|
||||||
byte(n >> 24),
|
|
||||||
byte(n >> 32),
|
|
||||||
byte(n >> 40),
|
|
||||||
byte(n >> 48),
|
|
||||||
byte(n >> 56),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func uint64ToString(n uint64) []byte {
|
|
||||||
var a [20]byte
|
|
||||||
i := 20
|
|
||||||
|
|
||||||
// U+0030 = 0
|
|
||||||
// ...
|
|
||||||
// U+0039 = 9
|
|
||||||
|
|
||||||
var q uint64
|
|
||||||
for n >= 10 {
|
|
||||||
i--
|
|
||||||
q = n / 10
|
|
||||||
a[i] = uint8(n-q*10) + 0x30
|
|
||||||
n = q
|
|
||||||
}
|
|
||||||
|
|
||||||
i--
|
|
||||||
a[i] = uint8(n) + 0x30
|
|
||||||
|
|
||||||
return a[i:]
|
|
||||||
}
|
|
||||||
|
|
||||||
// treats string value as unsigned integer representation
|
|
||||||
func stringToInt(b []byte) int {
|
|
||||||
val := 0
|
|
||||||
for i := range b {
|
|
||||||
val *= 10
|
|
||||||
val += int(b[i] - 0x30)
|
|
||||||
}
|
|
||||||
return val
|
|
||||||
}
|
|
||||||
|
|
||||||
// returns the string read as a bytes slice, wheter the value is NULL,
|
|
||||||
// the number of bytes read and an error, in case the string is longer than
|
|
||||||
// the input slice
|
|
||||||
func readLengthEncodedString(b []byte) ([]byte, bool, int, error) {
|
|
||||||
// Get length
|
|
||||||
num, isNull, n := readLengthEncodedInteger(b)
|
|
||||||
if num < 1 {
|
|
||||||
return b[n:n], isNull, n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
n += int(num)
|
|
||||||
|
|
||||||
// Check data length
|
|
||||||
if len(b) >= n {
|
|
||||||
return b[n-int(num) : n], false, n, nil
|
|
||||||
}
|
|
||||||
return nil, false, n, io.EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
// returns the number of bytes skipped and an error, in case the string is
|
|
||||||
// longer than the input slice
|
|
||||||
func skipLengthEncodedString(b []byte) (int, error) {
|
|
||||||
// Get length
|
|
||||||
num, _, n := readLengthEncodedInteger(b)
|
|
||||||
if num < 1 {
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
n += int(num)
|
|
||||||
|
|
||||||
// Check data length
|
|
||||||
if len(b) >= n {
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
return n, io.EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
// returns the number read, whether the value is NULL and the number of bytes read
|
|
||||||
func readLengthEncodedInteger(b []byte) (uint64, bool, int) {
|
|
||||||
// See issue #349
|
|
||||||
if len(b) == 0 {
|
|
||||||
return 0, true, 1
|
|
||||||
}
|
|
||||||
switch b[0] {
|
|
||||||
|
|
||||||
// 251: NULL
|
|
||||||
case 0xfb:
|
|
||||||
return 0, true, 1
|
|
||||||
|
|
||||||
// 252: value of following 2
|
|
||||||
case 0xfc:
|
|
||||||
return uint64(b[1]) | uint64(b[2])<<8, false, 3
|
|
||||||
|
|
||||||
// 253: value of following 3
|
|
||||||
case 0xfd:
|
|
||||||
return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16, false, 4
|
|
||||||
|
|
||||||
// 254: value of following 8
|
|
||||||
case 0xfe:
|
|
||||||
return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16 |
|
|
||||||
uint64(b[4])<<24 | uint64(b[5])<<32 | uint64(b[6])<<40 |
|
|
||||||
uint64(b[7])<<48 | uint64(b[8])<<56,
|
|
||||||
false, 9
|
|
||||||
}
|
|
||||||
|
|
||||||
// 0-250: value of first byte
|
|
||||||
return uint64(b[0]), false, 1
|
|
||||||
}
|
|
||||||
|
|
||||||
// encodes a uint64 value and appends it to the given bytes slice
|
|
||||||
func appendLengthEncodedInteger(b []byte, n uint64) []byte {
|
|
||||||
switch {
|
|
||||||
case n <= 250:
|
|
||||||
return append(b, byte(n))
|
|
||||||
|
|
||||||
case n <= 0xffff:
|
|
||||||
return append(b, 0xfc, byte(n), byte(n>>8))
|
|
||||||
|
|
||||||
case n <= 0xffffff:
|
|
||||||
return append(b, 0xfd, byte(n), byte(n>>8), byte(n>>16))
|
|
||||||
}
|
|
||||||
return append(b, 0xfe, byte(n), byte(n>>8), byte(n>>16), byte(n>>24),
|
|
||||||
byte(n>>32), byte(n>>40), byte(n>>48), byte(n>>56))
|
|
||||||
}
|
|
||||||
|
|
||||||
// reserveBuffer checks cap(buf) and expand buffer to len(buf) + appendSize.
|
|
||||||
// If cap(buf) is not enough, reallocate new buffer.
|
|
||||||
func reserveBuffer(buf []byte, appendSize int) []byte {
|
|
||||||
newSize := len(buf) + appendSize
|
|
||||||
if cap(buf) < newSize {
|
|
||||||
// Grow buffer exponentially
|
|
||||||
newBuf := make([]byte, len(buf)*2+appendSize)
|
|
||||||
copy(newBuf, buf)
|
|
||||||
buf = newBuf
|
|
||||||
}
|
|
||||||
return buf[:newSize]
|
|
||||||
}
|
|
||||||
|
|
||||||
// escapeBytesBackslash escapes []byte with backslashes (\)
|
|
||||||
// This escapes the contents of a string (provided as []byte) by adding backslashes before special
|
|
||||||
// characters, and turning others into specific escape sequences, such as
|
|
||||||
// turning newlines into \n and null bytes into \0.
|
|
||||||
// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L823-L932
|
|
||||||
func escapeBytesBackslash(buf, v []byte) []byte {
|
|
||||||
pos := len(buf)
|
|
||||||
buf = reserveBuffer(buf, len(v)*2)
|
|
||||||
|
|
||||||
for _, c := range v {
|
|
||||||
switch c {
|
|
||||||
case '\x00':
|
|
||||||
buf[pos] = '\\'
|
|
||||||
buf[pos+1] = '0'
|
|
||||||
pos += 2
|
|
||||||
case '\n':
|
|
||||||
buf[pos] = '\\'
|
|
||||||
buf[pos+1] = 'n'
|
|
||||||
pos += 2
|
|
||||||
case '\r':
|
|
||||||
buf[pos] = '\\'
|
|
||||||
buf[pos+1] = 'r'
|
|
||||||
pos += 2
|
|
||||||
case '\x1a':
|
|
||||||
buf[pos] = '\\'
|
|
||||||
buf[pos+1] = 'Z'
|
|
||||||
pos += 2
|
|
||||||
case '\'':
|
|
||||||
buf[pos] = '\\'
|
|
||||||
buf[pos+1] = '\''
|
|
||||||
pos += 2
|
|
||||||
case '"':
|
|
||||||
buf[pos] = '\\'
|
|
||||||
buf[pos+1] = '"'
|
|
||||||
pos += 2
|
|
||||||
case '\\':
|
|
||||||
buf[pos] = '\\'
|
|
||||||
buf[pos+1] = '\\'
|
|
||||||
pos += 2
|
|
||||||
default:
|
|
||||||
buf[pos] = c
|
|
||||||
pos++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return buf[:pos]
|
|
||||||
}
|
|
||||||
|
|
||||||
// escapeStringBackslash is similar to escapeBytesBackslash but for string.
|
|
||||||
func escapeStringBackslash(buf []byte, v string) []byte {
|
|
||||||
pos := len(buf)
|
|
||||||
buf = reserveBuffer(buf, len(v)*2)
|
|
||||||
|
|
||||||
for i := 0; i < len(v); i++ {
|
|
||||||
c := v[i]
|
|
||||||
switch c {
|
|
||||||
case '\x00':
|
|
||||||
buf[pos] = '\\'
|
|
||||||
buf[pos+1] = '0'
|
|
||||||
pos += 2
|
|
||||||
case '\n':
|
|
||||||
buf[pos] = '\\'
|
|
||||||
buf[pos+1] = 'n'
|
|
||||||
pos += 2
|
|
||||||
case '\r':
|
|
||||||
buf[pos] = '\\'
|
|
||||||
buf[pos+1] = 'r'
|
|
||||||
pos += 2
|
|
||||||
case '\x1a':
|
|
||||||
buf[pos] = '\\'
|
|
||||||
buf[pos+1] = 'Z'
|
|
||||||
pos += 2
|
|
||||||
case '\'':
|
|
||||||
buf[pos] = '\\'
|
|
||||||
buf[pos+1] = '\''
|
|
||||||
pos += 2
|
|
||||||
case '"':
|
|
||||||
buf[pos] = '\\'
|
|
||||||
buf[pos+1] = '"'
|
|
||||||
pos += 2
|
|
||||||
case '\\':
|
|
||||||
buf[pos] = '\\'
|
|
||||||
buf[pos+1] = '\\'
|
|
||||||
pos += 2
|
|
||||||
default:
|
|
||||||
buf[pos] = c
|
|
||||||
pos++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return buf[:pos]
|
|
||||||
}
|
|
||||||
|
|
||||||
// escapeBytesQuotes escapes apostrophes in []byte by doubling them up.
|
|
||||||
// This escapes the contents of a string by doubling up any apostrophes that
|
|
||||||
// it contains. This is used when the NO_BACKSLASH_ESCAPES SQL_MODE is in
|
|
||||||
// effect on the server.
|
|
||||||
// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L963-L1038
|
|
||||||
func escapeBytesQuotes(buf, v []byte) []byte {
|
|
||||||
pos := len(buf)
|
|
||||||
buf = reserveBuffer(buf, len(v)*2)
|
|
||||||
|
|
||||||
for _, c := range v {
|
|
||||||
if c == '\'' {
|
|
||||||
buf[pos] = '\''
|
|
||||||
buf[pos+1] = '\''
|
|
||||||
pos += 2
|
|
||||||
} else {
|
|
||||||
buf[pos] = c
|
|
||||||
pos++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return buf[:pos]
|
|
||||||
}
|
|
||||||
|
|
||||||
// escapeStringQuotes is similar to escapeBytesQuotes but for string.
|
|
||||||
func escapeStringQuotes(buf []byte, v string) []byte {
|
|
||||||
pos := len(buf)
|
|
||||||
buf = reserveBuffer(buf, len(v)*2)
|
|
||||||
|
|
||||||
for i := 0; i < len(v); i++ {
|
|
||||||
c := v[i]
|
|
||||||
if c == '\'' {
|
|
||||||
buf[pos] = '\''
|
|
||||||
buf[pos+1] = '\''
|
|
||||||
pos += 2
|
|
||||||
} else {
|
|
||||||
buf[pos] = c
|
|
||||||
pos++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return buf[:pos]
|
|
||||||
}
|
|
|
@ -1,19 +0,0 @@
|
||||||
Copyright 2012 Keith Rarick
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in
|
|
||||||
all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
||||||
THE SOFTWARE.
|
|
|
@ -1,9 +0,0 @@
|
||||||
package pretty
|
|
||||||
|
|
||||||
import "github.com/kr/pretty"
|
|
||||||
|
|
||||||
Package pretty provides pretty-printing for Go values.
|
|
||||||
|
|
||||||
Documentation
|
|
||||||
|
|
||||||
http://godoc.org/github.com/kr/pretty
|
|
|
@ -1,148 +0,0 @@
|
||||||
package pretty
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"reflect"
|
|
||||||
)
|
|
||||||
|
|
||||||
type sbuf []string
|
|
||||||
|
|
||||||
func (s *sbuf) Write(b []byte) (int, error) {
|
|
||||||
*s = append(*s, string(b))
|
|
||||||
return len(b), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Diff returns a slice where each element describes
|
|
||||||
// a difference between a and b.
|
|
||||||
func Diff(a, b interface{}) (desc []string) {
|
|
||||||
Fdiff((*sbuf)(&desc), a, b)
|
|
||||||
return desc
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fdiff writes to w a description of the differences between a and b.
|
|
||||||
func Fdiff(w io.Writer, a, b interface{}) {
|
|
||||||
diffWriter{w: w}.diff(reflect.ValueOf(a), reflect.ValueOf(b))
|
|
||||||
}
|
|
||||||
|
|
||||||
type diffWriter struct {
|
|
||||||
w io.Writer
|
|
||||||
l string // label
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w diffWriter) printf(f string, a ...interface{}) {
|
|
||||||
var l string
|
|
||||||
if w.l != "" {
|
|
||||||
l = w.l + ": "
|
|
||||||
}
|
|
||||||
fmt.Fprintf(w.w, l+f, a...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w diffWriter) diff(av, bv reflect.Value) {
|
|
||||||
if !av.IsValid() && bv.IsValid() {
|
|
||||||
w.printf("nil != %#v", bv.Interface())
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if av.IsValid() && !bv.IsValid() {
|
|
||||||
w.printf("%#v != nil", av.Interface())
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if !av.IsValid() && !bv.IsValid() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
at := av.Type()
|
|
||||||
bt := bv.Type()
|
|
||||||
if at != bt {
|
|
||||||
w.printf("%v != %v", at, bt)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// numeric types, including bool
|
|
||||||
if at.Kind() < reflect.Array {
|
|
||||||
a, b := av.Interface(), bv.Interface()
|
|
||||||
if a != b {
|
|
||||||
w.printf("%#v != %#v", a, b)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
switch at.Kind() {
|
|
||||||
case reflect.String:
|
|
||||||
a, b := av.Interface(), bv.Interface()
|
|
||||||
if a != b {
|
|
||||||
w.printf("%q != %q", a, b)
|
|
||||||
}
|
|
||||||
case reflect.Ptr:
|
|
||||||
switch {
|
|
||||||
case av.IsNil() && !bv.IsNil():
|
|
||||||
w.printf("nil != %v", bv.Interface())
|
|
||||||
case !av.IsNil() && bv.IsNil():
|
|
||||||
w.printf("%v != nil", av.Interface())
|
|
||||||
case !av.IsNil() && !bv.IsNil():
|
|
||||||
w.diff(av.Elem(), bv.Elem())
|
|
||||||
}
|
|
||||||
case reflect.Struct:
|
|
||||||
for i := 0; i < av.NumField(); i++ {
|
|
||||||
w.relabel(at.Field(i).Name).diff(av.Field(i), bv.Field(i))
|
|
||||||
}
|
|
||||||
case reflect.Map:
|
|
||||||
ak, both, bk := keyDiff(av.MapKeys(), bv.MapKeys())
|
|
||||||
for _, k := range ak {
|
|
||||||
w := w.relabel(fmt.Sprintf("[%#v]", k.Interface()))
|
|
||||||
w.printf("%q != (missing)", av.MapIndex(k))
|
|
||||||
}
|
|
||||||
for _, k := range both {
|
|
||||||
w := w.relabel(fmt.Sprintf("[%#v]", k.Interface()))
|
|
||||||
w.diff(av.MapIndex(k), bv.MapIndex(k))
|
|
||||||
}
|
|
||||||
for _, k := range bk {
|
|
||||||
w := w.relabel(fmt.Sprintf("[%#v]", k.Interface()))
|
|
||||||
w.printf("(missing) != %q", bv.MapIndex(k))
|
|
||||||
}
|
|
||||||
case reflect.Interface:
|
|
||||||
w.diff(reflect.ValueOf(av.Interface()), reflect.ValueOf(bv.Interface()))
|
|
||||||
default:
|
|
||||||
if !reflect.DeepEqual(av.Interface(), bv.Interface()) {
|
|
||||||
w.printf("%# v != %# v", Formatter(av.Interface()), Formatter(bv.Interface()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d diffWriter) relabel(name string) (d1 diffWriter) {
|
|
||||||
d1 = d
|
|
||||||
if d.l != "" && name[0] != '[' {
|
|
||||||
d1.l += "."
|
|
||||||
}
|
|
||||||
d1.l += name
|
|
||||||
return d1
|
|
||||||
}
|
|
||||||
|
|
||||||
func keyDiff(a, b []reflect.Value) (ak, both, bk []reflect.Value) {
|
|
||||||
for _, av := range a {
|
|
||||||
inBoth := false
|
|
||||||
for _, bv := range b {
|
|
||||||
if reflect.DeepEqual(av.Interface(), bv.Interface()) {
|
|
||||||
inBoth = true
|
|
||||||
both = append(both, av)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !inBoth {
|
|
||||||
ak = append(ak, av)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, bv := range b {
|
|
||||||
inBoth := false
|
|
||||||
for _, av := range a {
|
|
||||||
if reflect.DeepEqual(av.Interface(), bv.Interface()) {
|
|
||||||
inBoth = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !inBoth {
|
|
||||||
bk = append(bk, bv)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
|
@ -1,300 +0,0 @@
|
||||||
package pretty
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"github.com/kr/text"
|
|
||||||
"io"
|
|
||||||
"reflect"
|
|
||||||
"strconv"
|
|
||||||
"text/tabwriter"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
limit = 50
|
|
||||||
)
|
|
||||||
|
|
||||||
type formatter struct {
|
|
||||||
x interface{}
|
|
||||||
force bool
|
|
||||||
quote bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Formatter makes a wrapper, f, that will format x as go source with line
|
|
||||||
// breaks and tabs. Object f responds to the "%v" formatting verb when both the
|
|
||||||
// "#" and " " (space) flags are set, for example:
|
|
||||||
//
|
|
||||||
// fmt.Sprintf("%# v", Formatter(x))
|
|
||||||
//
|
|
||||||
// If one of these two flags is not set, or any other verb is used, f will
|
|
||||||
// format x according to the usual rules of package fmt.
|
|
||||||
// In particular, if x satisfies fmt.Formatter, then x.Format will be called.
|
|
||||||
func Formatter(x interface{}) (f fmt.Formatter) {
|
|
||||||
return formatter{x: x, quote: true}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fo formatter) String() string {
|
|
||||||
return fmt.Sprint(fo.x) // unwrap it
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fo formatter) passThrough(f fmt.State, c rune) {
|
|
||||||
s := "%"
|
|
||||||
for i := 0; i < 128; i++ {
|
|
||||||
if f.Flag(i) {
|
|
||||||
s += string(i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if w, ok := f.Width(); ok {
|
|
||||||
s += fmt.Sprintf("%d", w)
|
|
||||||
}
|
|
||||||
if p, ok := f.Precision(); ok {
|
|
||||||
s += fmt.Sprintf(".%d", p)
|
|
||||||
}
|
|
||||||
s += string(c)
|
|
||||||
fmt.Fprintf(f, s, fo.x)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fo formatter) Format(f fmt.State, c rune) {
|
|
||||||
if fo.force || c == 'v' && f.Flag('#') && f.Flag(' ') {
|
|
||||||
w := tabwriter.NewWriter(f, 4, 4, 1, ' ', 0)
|
|
||||||
p := &printer{tw: w, Writer: w}
|
|
||||||
p.printValue(reflect.ValueOf(fo.x), true, fo.quote)
|
|
||||||
w.Flush()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
fo.passThrough(f, c)
|
|
||||||
}
|
|
||||||
|
|
||||||
type printer struct {
|
|
||||||
io.Writer
|
|
||||||
tw *tabwriter.Writer
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *printer) indent() *printer {
|
|
||||||
q := *p
|
|
||||||
q.tw = tabwriter.NewWriter(p.Writer, 4, 4, 1, ' ', 0)
|
|
||||||
q.Writer = text.NewIndentWriter(q.tw, []byte{'\t'})
|
|
||||||
return &q
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *printer) printInline(v reflect.Value, x interface{}, showType bool) {
|
|
||||||
if showType {
|
|
||||||
io.WriteString(p, v.Type().String())
|
|
||||||
fmt.Fprintf(p, "(%#v)", x)
|
|
||||||
} else {
|
|
||||||
fmt.Fprintf(p, "%#v", x)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *printer) printValue(v reflect.Value, showType, quote bool) {
|
|
||||||
switch v.Kind() {
|
|
||||||
case reflect.Bool:
|
|
||||||
p.printInline(v, v.Bool(), showType)
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
|
||||||
p.printInline(v, v.Int(), showType)
|
|
||||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
|
||||||
p.printInline(v, v.Uint(), showType)
|
|
||||||
case reflect.Float32, reflect.Float64:
|
|
||||||
p.printInline(v, v.Float(), showType)
|
|
||||||
case reflect.Complex64, reflect.Complex128:
|
|
||||||
fmt.Fprintf(p, "%#v", v.Complex())
|
|
||||||
case reflect.String:
|
|
||||||
p.fmtString(v.String(), quote)
|
|
||||||
case reflect.Map:
|
|
||||||
t := v.Type()
|
|
||||||
if showType {
|
|
||||||
io.WriteString(p, t.String())
|
|
||||||
}
|
|
||||||
writeByte(p, '{')
|
|
||||||
if nonzero(v) {
|
|
||||||
expand := !canInline(v.Type())
|
|
||||||
pp := p
|
|
||||||
if expand {
|
|
||||||
writeByte(p, '\n')
|
|
||||||
pp = p.indent()
|
|
||||||
}
|
|
||||||
keys := v.MapKeys()
|
|
||||||
for i := 0; i < v.Len(); i++ {
|
|
||||||
showTypeInStruct := true
|
|
||||||
k := keys[i]
|
|
||||||
mv := v.MapIndex(k)
|
|
||||||
pp.printValue(k, false, true)
|
|
||||||
writeByte(pp, ':')
|
|
||||||
if expand {
|
|
||||||
writeByte(pp, '\t')
|
|
||||||
}
|
|
||||||
showTypeInStruct = t.Elem().Kind() == reflect.Interface
|
|
||||||
pp.printValue(mv, showTypeInStruct, true)
|
|
||||||
if expand {
|
|
||||||
io.WriteString(pp, ",\n")
|
|
||||||
} else if i < v.Len()-1 {
|
|
||||||
io.WriteString(pp, ", ")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if expand {
|
|
||||||
pp.tw.Flush()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
writeByte(p, '}')
|
|
||||||
case reflect.Struct:
|
|
||||||
t := v.Type()
|
|
||||||
if showType {
|
|
||||||
io.WriteString(p, t.String())
|
|
||||||
}
|
|
||||||
writeByte(p, '{')
|
|
||||||
if nonzero(v) {
|
|
||||||
expand := !canInline(v.Type())
|
|
||||||
pp := p
|
|
||||||
if expand {
|
|
||||||
writeByte(p, '\n')
|
|
||||||
pp = p.indent()
|
|
||||||
}
|
|
||||||
for i := 0; i < v.NumField(); i++ {
|
|
||||||
showTypeInStruct := true
|
|
||||||
if f := t.Field(i); f.Name != "" {
|
|
||||||
io.WriteString(pp, f.Name)
|
|
||||||
writeByte(pp, ':')
|
|
||||||
if expand {
|
|
||||||
writeByte(pp, '\t')
|
|
||||||
}
|
|
||||||
showTypeInStruct = f.Type.Kind() == reflect.Interface
|
|
||||||
}
|
|
||||||
pp.printValue(getField(v, i), showTypeInStruct, true)
|
|
||||||
if expand {
|
|
||||||
io.WriteString(pp, ",\n")
|
|
||||||
} else if i < v.NumField()-1 {
|
|
||||||
io.WriteString(pp, ", ")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if expand {
|
|
||||||
pp.tw.Flush()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
writeByte(p, '}')
|
|
||||||
case reflect.Interface:
|
|
||||||
switch e := v.Elem(); {
|
|
||||||
case e.Kind() == reflect.Invalid:
|
|
||||||
io.WriteString(p, "nil")
|
|
||||||
case e.IsValid():
|
|
||||||
p.printValue(e, showType, true)
|
|
||||||
default:
|
|
||||||
io.WriteString(p, v.Type().String())
|
|
||||||
io.WriteString(p, "(nil)")
|
|
||||||
}
|
|
||||||
case reflect.Array, reflect.Slice:
|
|
||||||
t := v.Type()
|
|
||||||
if showType {
|
|
||||||
io.WriteString(p, t.String())
|
|
||||||
}
|
|
||||||
if v.Kind() == reflect.Slice && v.IsNil() && showType {
|
|
||||||
io.WriteString(p, "(nil)")
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if v.Kind() == reflect.Slice && v.IsNil() {
|
|
||||||
io.WriteString(p, "nil")
|
|
||||||
break
|
|
||||||
}
|
|
||||||
writeByte(p, '{')
|
|
||||||
expand := !canInline(v.Type())
|
|
||||||
pp := p
|
|
||||||
if expand {
|
|
||||||
writeByte(p, '\n')
|
|
||||||
pp = p.indent()
|
|
||||||
}
|
|
||||||
for i := 0; i < v.Len(); i++ {
|
|
||||||
showTypeInSlice := t.Elem().Kind() == reflect.Interface
|
|
||||||
pp.printValue(v.Index(i), showTypeInSlice, true)
|
|
||||||
if expand {
|
|
||||||
io.WriteString(pp, ",\n")
|
|
||||||
} else if i < v.Len()-1 {
|
|
||||||
io.WriteString(pp, ", ")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if expand {
|
|
||||||
pp.tw.Flush()
|
|
||||||
}
|
|
||||||
writeByte(p, '}')
|
|
||||||
case reflect.Ptr:
|
|
||||||
e := v.Elem()
|
|
||||||
if !e.IsValid() {
|
|
||||||
writeByte(p, '(')
|
|
||||||
io.WriteString(p, v.Type().String())
|
|
||||||
io.WriteString(p, ")(nil)")
|
|
||||||
} else {
|
|
||||||
writeByte(p, '&')
|
|
||||||
p.printValue(e, true, true)
|
|
||||||
}
|
|
||||||
case reflect.Chan:
|
|
||||||
x := v.Pointer()
|
|
||||||
if showType {
|
|
||||||
writeByte(p, '(')
|
|
||||||
io.WriteString(p, v.Type().String())
|
|
||||||
fmt.Fprintf(p, ")(%#v)", x)
|
|
||||||
} else {
|
|
||||||
fmt.Fprintf(p, "%#v", x)
|
|
||||||
}
|
|
||||||
case reflect.Func:
|
|
||||||
io.WriteString(p, v.Type().String())
|
|
||||||
io.WriteString(p, " {...}")
|
|
||||||
case reflect.UnsafePointer:
|
|
||||||
p.printInline(v, v.Pointer(), showType)
|
|
||||||
case reflect.Invalid:
|
|
||||||
io.WriteString(p, "nil")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func canInline(t reflect.Type) bool {
|
|
||||||
switch t.Kind() {
|
|
||||||
case reflect.Map:
|
|
||||||
return !canExpand(t.Elem())
|
|
||||||
case reflect.Struct:
|
|
||||||
for i := 0; i < t.NumField(); i++ {
|
|
||||||
if canExpand(t.Field(i).Type) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
case reflect.Interface:
|
|
||||||
return false
|
|
||||||
case reflect.Array, reflect.Slice:
|
|
||||||
return !canExpand(t.Elem())
|
|
||||||
case reflect.Ptr:
|
|
||||||
return false
|
|
||||||
case reflect.Chan, reflect.Func, reflect.UnsafePointer:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func canExpand(t reflect.Type) bool {
|
|
||||||
switch t.Kind() {
|
|
||||||
case reflect.Map, reflect.Struct,
|
|
||||||
reflect.Interface, reflect.Array, reflect.Slice,
|
|
||||||
reflect.Ptr:
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *printer) fmtString(s string, quote bool) {
|
|
||||||
if quote {
|
|
||||||
s = strconv.Quote(s)
|
|
||||||
}
|
|
||||||
io.WriteString(p, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func tryDeepEqual(a, b interface{}) bool {
|
|
||||||
defer func() { recover() }()
|
|
||||||
return reflect.DeepEqual(a, b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func writeByte(w io.Writer, b byte) {
|
|
||||||
w.Write([]byte{b})
|
|
||||||
}
|
|
||||||
|
|
||||||
func getField(v reflect.Value, i int) reflect.Value {
|
|
||||||
val := v.Field(i)
|
|
||||||
if val.Kind() == reflect.Interface && !val.IsNil() {
|
|
||||||
val = val.Elem()
|
|
||||||
}
|
|
||||||
return val
|
|
||||||
}
|
|
|
@ -1,98 +0,0 @@
|
||||||
// Package pretty provides pretty-printing for Go values. This is
|
|
||||||
// useful during debugging, to avoid wrapping long output lines in
|
|
||||||
// the terminal.
|
|
||||||
//
|
|
||||||
// It provides a function, Formatter, that can be used with any
|
|
||||||
// function that accepts a format string. It also provides
|
|
||||||
// convenience wrappers for functions in packages fmt and log.
|
|
||||||
package pretty
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Errorf is a convenience wrapper for fmt.Errorf.
|
|
||||||
//
|
|
||||||
// Calling Errorf(f, x, y) is equivalent to
|
|
||||||
// fmt.Errorf(f, Formatter(x), Formatter(y)).
|
|
||||||
func Errorf(format string, a ...interface{}) error {
|
|
||||||
return fmt.Errorf(format, wrap(a, false)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fprintf is a convenience wrapper for fmt.Fprintf.
|
|
||||||
//
|
|
||||||
// Calling Fprintf(w, f, x, y) is equivalent to
|
|
||||||
// fmt.Fprintf(w, f, Formatter(x), Formatter(y)).
|
|
||||||
func Fprintf(w io.Writer, format string, a ...interface{}) (n int, error error) {
|
|
||||||
return fmt.Fprintf(w, format, wrap(a, false)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Log is a convenience wrapper for log.Printf.
|
|
||||||
//
|
|
||||||
// Calling Log(x, y) is equivalent to
|
|
||||||
// log.Print(Formatter(x), Formatter(y)), but each operand is
|
|
||||||
// formatted with "%# v".
|
|
||||||
func Log(a ...interface{}) {
|
|
||||||
log.Print(wrap(a, true)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Logf is a convenience wrapper for log.Printf.
|
|
||||||
//
|
|
||||||
// Calling Logf(f, x, y) is equivalent to
|
|
||||||
// log.Printf(f, Formatter(x), Formatter(y)).
|
|
||||||
func Logf(format string, a ...interface{}) {
|
|
||||||
log.Printf(format, wrap(a, false)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Logln is a convenience wrapper for log.Printf.
|
|
||||||
//
|
|
||||||
// Calling Logln(x, y) is equivalent to
|
|
||||||
// log.Println(Formatter(x), Formatter(y)), but each operand is
|
|
||||||
// formatted with "%# v".
|
|
||||||
func Logln(a ...interface{}) {
|
|
||||||
log.Println(wrap(a, true)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Print pretty-prints its operands and writes to standard output.
|
|
||||||
//
|
|
||||||
// Calling Print(x, y) is equivalent to
|
|
||||||
// fmt.Print(Formatter(x), Formatter(y)), but each operand is
|
|
||||||
// formatted with "%# v".
|
|
||||||
func Print(a ...interface{}) (n int, errno error) {
|
|
||||||
return fmt.Print(wrap(a, true)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Printf is a convenience wrapper for fmt.Printf.
|
|
||||||
//
|
|
||||||
// Calling Printf(f, x, y) is equivalent to
|
|
||||||
// fmt.Printf(f, Formatter(x), Formatter(y)).
|
|
||||||
func Printf(format string, a ...interface{}) (n int, errno error) {
|
|
||||||
return fmt.Printf(format, wrap(a, false)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Println pretty-prints its operands and writes to standard output.
|
|
||||||
//
|
|
||||||
// Calling Print(x, y) is equivalent to
|
|
||||||
// fmt.Println(Formatter(x), Formatter(y)), but each operand is
|
|
||||||
// formatted with "%# v".
|
|
||||||
func Println(a ...interface{}) (n int, errno error) {
|
|
||||||
return fmt.Println(wrap(a, true)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sprintf is a convenience wrapper for fmt.Sprintf.
|
|
||||||
//
|
|
||||||
// Calling Sprintf(f, x, y) is equivalent to
|
|
||||||
// fmt.Sprintf(f, Formatter(x), Formatter(y)).
|
|
||||||
func Sprintf(format string, a ...interface{}) string {
|
|
||||||
return fmt.Sprintf(format, wrap(a, false)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func wrap(a []interface{}, force bool) []interface{} {
|
|
||||||
w := make([]interface{}, len(a))
|
|
||||||
for i, x := range a {
|
|
||||||
w[i] = formatter{x: x, force: force}
|
|
||||||
}
|
|
||||||
return w
|
|
||||||
}
|
|
|
@ -1,41 +0,0 @@
|
||||||
package pretty
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
)
|
|
||||||
|
|
||||||
func nonzero(v reflect.Value) bool {
|
|
||||||
switch v.Kind() {
|
|
||||||
case reflect.Bool:
|
|
||||||
return v.Bool()
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
|
||||||
return v.Int() != 0
|
|
||||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
|
||||||
return v.Uint() != 0
|
|
||||||
case reflect.Float32, reflect.Float64:
|
|
||||||
return v.Float() != 0
|
|
||||||
case reflect.Complex64, reflect.Complex128:
|
|
||||||
return v.Complex() != complex(0, 0)
|
|
||||||
case reflect.String:
|
|
||||||
return v.String() != ""
|
|
||||||
case reflect.Struct:
|
|
||||||
for i := 0; i < v.NumField(); i++ {
|
|
||||||
if nonzero(getField(v, i)) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
case reflect.Array:
|
|
||||||
for i := 0; i < v.Len(); i++ {
|
|
||||||
if nonzero(v.Index(i)) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
case reflect.Map, reflect.Interface, reflect.Slice, reflect.Ptr, reflect.Chan, reflect.Func:
|
|
||||||
return !v.IsNil()
|
|
||||||
case reflect.UnsafePointer:
|
|
||||||
return v.Pointer() != 0
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
|
@ -1,19 +0,0 @@
|
||||||
Copyright 2012 Keith Rarick
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in
|
|
||||||
all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
||||||
THE SOFTWARE.
|
|
|
@ -1,3 +0,0 @@
|
||||||
This is a Go package for manipulating paragraphs of text.
|
|
||||||
|
|
||||||
See http://go.pkgdoc.org/github.com/kr/text for full documentation.
|
|
|
@ -1,3 +0,0 @@
|
||||||
// Package text provides rudimentary functions for manipulating text in
|
|
||||||
// paragraphs.
|
|
||||||
package text
|
|
|
@ -1,74 +0,0 @@
|
||||||
package text
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Indent inserts prefix at the beginning of each non-empty line of s. The
|
|
||||||
// end-of-line marker is NL.
|
|
||||||
func Indent(s, prefix string) string {
|
|
||||||
return string(IndentBytes([]byte(s), []byte(prefix)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// IndentBytes inserts prefix at the beginning of each non-empty line of b.
|
|
||||||
// The end-of-line marker is NL.
|
|
||||||
func IndentBytes(b, prefix []byte) []byte {
|
|
||||||
var res []byte
|
|
||||||
bol := true
|
|
||||||
for _, c := range b {
|
|
||||||
if bol && c != '\n' {
|
|
||||||
res = append(res, prefix...)
|
|
||||||
}
|
|
||||||
res = append(res, c)
|
|
||||||
bol = c == '\n'
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
// Writer indents each line of its input.
|
|
||||||
type indentWriter struct {
|
|
||||||
w io.Writer
|
|
||||||
bol bool
|
|
||||||
pre [][]byte
|
|
||||||
sel int
|
|
||||||
off int
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewIndentWriter makes a new write filter that indents the input
|
|
||||||
// lines. Each line is prefixed in order with the corresponding
|
|
||||||
// element of pre. If there are more lines than elements, the last
|
|
||||||
// element of pre is repeated for each subsequent line.
|
|
||||||
func NewIndentWriter(w io.Writer, pre ...[]byte) io.Writer {
|
|
||||||
return &indentWriter{
|
|
||||||
w: w,
|
|
||||||
pre: pre,
|
|
||||||
bol: true,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// The only errors returned are from the underlying indentWriter.
|
|
||||||
func (w *indentWriter) Write(p []byte) (n int, err error) {
|
|
||||||
for _, c := range p {
|
|
||||||
if w.bol {
|
|
||||||
var i int
|
|
||||||
i, err = w.w.Write(w.pre[w.sel][w.off:])
|
|
||||||
w.off += i
|
|
||||||
if err != nil {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_, err = w.w.Write([]byte{c})
|
|
||||||
if err != nil {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
n++
|
|
||||||
w.bol = c == '\n'
|
|
||||||
if w.bol {
|
|
||||||
w.off = 0
|
|
||||||
if w.sel < len(w.pre)-1 {
|
|
||||||
w.sel++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return n, nil
|
|
||||||
}
|
|
|
@ -1,86 +0,0 @@
|
||||||
package text
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"math"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
nl = []byte{'\n'}
|
|
||||||
sp = []byte{' '}
|
|
||||||
)
|
|
||||||
|
|
||||||
const defaultPenalty = 1e5
|
|
||||||
|
|
||||||
// Wrap wraps s into a paragraph of lines of length lim, with minimal
|
|
||||||
// raggedness.
|
|
||||||
func Wrap(s string, lim int) string {
|
|
||||||
return string(WrapBytes([]byte(s), lim))
|
|
||||||
}
|
|
||||||
|
|
||||||
// WrapBytes wraps b into a paragraph of lines of length lim, with minimal
|
|
||||||
// raggedness.
|
|
||||||
func WrapBytes(b []byte, lim int) []byte {
|
|
||||||
words := bytes.Split(bytes.Replace(bytes.TrimSpace(b), nl, sp, -1), sp)
|
|
||||||
var lines [][]byte
|
|
||||||
for _, line := range WrapWords(words, 1, lim, defaultPenalty) {
|
|
||||||
lines = append(lines, bytes.Join(line, sp))
|
|
||||||
}
|
|
||||||
return bytes.Join(lines, nl)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WrapWords is the low-level line-breaking algorithm, useful if you need more
|
|
||||||
// control over the details of the text wrapping process. For most uses, either
|
|
||||||
// Wrap or WrapBytes will be sufficient and more convenient.
|
|
||||||
//
|
|
||||||
// WrapWords splits a list of words into lines with minimal "raggedness",
|
|
||||||
// treating each byte as one unit, accounting for spc units between adjacent
|
|
||||||
// words on each line, and attempting to limit lines to lim units. Raggedness
|
|
||||||
// is the total error over all lines, where error is the square of the
|
|
||||||
// difference of the length of the line and lim. Too-long lines (which only
|
|
||||||
// happen when a single word is longer than lim units) have pen penalty units
|
|
||||||
// added to the error.
|
|
||||||
func WrapWords(words [][]byte, spc, lim, pen int) [][][]byte {
|
|
||||||
n := len(words)
|
|
||||||
|
|
||||||
length := make([][]int, n)
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
length[i] = make([]int, n)
|
|
||||||
length[i][i] = len(words[i])
|
|
||||||
for j := i + 1; j < n; j++ {
|
|
||||||
length[i][j] = length[i][j-1] + spc + len(words[j])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
nbrk := make([]int, n)
|
|
||||||
cost := make([]int, n)
|
|
||||||
for i := range cost {
|
|
||||||
cost[i] = math.MaxInt32
|
|
||||||
}
|
|
||||||
for i := n - 1; i >= 0; i-- {
|
|
||||||
if length[i][n-1] <= lim {
|
|
||||||
cost[i] = 0
|
|
||||||
nbrk[i] = n
|
|
||||||
} else {
|
|
||||||
for j := i + 1; j < n; j++ {
|
|
||||||
d := lim - length[i][j-1]
|
|
||||||
c := d*d + cost[j]
|
|
||||||
if length[i][j-1] > lim {
|
|
||||||
c += pen // too-long lines get a worse penalty
|
|
||||||
}
|
|
||||||
if c < cost[i] {
|
|
||||||
cost[i] = c
|
|
||||||
nbrk[i] = j
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var lines [][][]byte
|
|
||||||
i := 0
|
|
||||||
for i < n {
|
|
||||||
lines = append(lines, words[i:nbrk[i]])
|
|
||||||
i = nbrk[i]
|
|
||||||
}
|
|
||||||
return lines
|
|
||||||
}
|
|
|
@ -1,25 +0,0 @@
|
||||||
goproperties - properties file decoder for Go
|
|
||||||
|
|
||||||
Copyright (c) 2013-2014 - Frank Schroeder
|
|
||||||
|
|
||||||
All rights reserved.
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
|
||||||
modification, are permitted provided that the following conditions are met:
|
|
||||||
|
|
||||||
1. Redistributions of source code must retain the above copyright notice, this
|
|
||||||
list of conditions and the following disclaimer.
|
|
||||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
|
||||||
this list of conditions and the following disclaimer in the documentation
|
|
||||||
and/or other materials provided with the distribution.
|
|
||||||
|
|
||||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
|
||||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
|
||||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
|
||||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
|
||||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
|
||||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
|
||||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
|
||||||
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
|
||||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
@ -1,121 +0,0 @@
|
||||||
Overview [![Build Status](https://travis-ci.org/magiconair/properties.png?branch=master)](https://travis-ci.org/magiconair/properties)
|
|
||||||
========
|
|
||||||
|
|
||||||
properties is a Go library for reading and writing properties files.
|
|
||||||
|
|
||||||
It supports reading from multiple files and Spring style recursive property
|
|
||||||
expansion of expressions like `${key}` to their corresponding value.
|
|
||||||
Value expressions can refer to other keys like in `${key}` or to
|
|
||||||
environment variables like in `${USER}`.
|
|
||||||
Filenames can also contain environment variables like in
|
|
||||||
`/home/${USER}/myapp.properties`.
|
|
||||||
|
|
||||||
Comments and the order of keys are preserved. Comments can be modified
|
|
||||||
and can be written to the output.
|
|
||||||
|
|
||||||
The properties library supports both ISO-8859-1 and UTF-8 encoded data.
|
|
||||||
|
|
||||||
Starting from version 1.3.0 the behavior of the MustXXX() functions is
|
|
||||||
configurable by providing a custom `ErrorHandler` function. The default has
|
|
||||||
changed from `panic` to `log.Fatal` but this is configurable and custom
|
|
||||||
error handling functions can be provided. See the package documentation for
|
|
||||||
details.
|
|
||||||
|
|
||||||
Getting Started
|
|
||||||
---------------
|
|
||||||
|
|
||||||
```go
|
|
||||||
import "github.com/magiconair/properties"
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
p := properties.MustLoadFile("${HOME}/config.properties", properties.UTF8)
|
|
||||||
host := p.MustGetString("host")
|
|
||||||
port := p.GetInt("port", 8080)
|
|
||||||
}
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
Read the full documentation on [GoDoc](https://godoc.org/github.com/magiconair/properties) [![GoDoc](https://godoc.org/github.com/magiconair/properties?status.png)](https://godoc.org/github.com/magiconair/properties)
|
|
||||||
|
|
||||||
Installation and Upgrade
|
|
||||||
------------------------
|
|
||||||
|
|
||||||
```
|
|
||||||
$ go get -u github.com/magiconair/properties
|
|
||||||
```
|
|
||||||
|
|
||||||
For testing and debugging you need the [go-check](https://github.com/go-check/check) library
|
|
||||||
|
|
||||||
```
|
|
||||||
$ go get -u gopkg.in/check.v1
|
|
||||||
```
|
|
||||||
|
|
||||||
History
|
|
||||||
-------
|
|
||||||
|
|
||||||
v1.5.3, 02 Jun 2015
|
|
||||||
-------------------
|
|
||||||
* [Issue #4](https://github.com/magiconair/properties/issues/4): Maintain key order in [Filter()](http://godoc.org/github.com/magiconair/properties#Properties.Filter), [FilterPrefix()](http://godoc.org/github.com/magiconair/properties#Properties.FilterPrefix) and [FilterRegexp()](http://godoc.org/github.com/magiconair/properties#Properties.FilterRegexp)
|
|
||||||
|
|
||||||
v1.5.2, 10 Apr 2015
|
|
||||||
-------------------
|
|
||||||
* [Issue #3](https://github.com/magiconair/properties/issues/3): Don't print comments in [WriteComment()](http://godoc.org/github.com/magiconair/properties#Properties.WriteComment) if they are all empty
|
|
||||||
* Add clickable links to README
|
|
||||||
|
|
||||||
v1.5.1, 08 Dec 2014
|
|
||||||
-------------------
|
|
||||||
* Added [GetParsedDuration()](http://godoc.org/github.com/magiconair/properties#Properties.GetParsedDuration) and [MustGetParsedDuration()](http://godoc.org/github.com/magiconair/properties#Properties.MustGetParsedDuration) for values specified compatible with
|
|
||||||
[time.ParseDuration()](http://golang.org/pkg/time/#ParseDuration).
|
|
||||||
|
|
||||||
v1.5.0, 18 Nov 2014
|
|
||||||
-------------------
|
|
||||||
* Added support for single and multi-line comments (reading, writing and updating)
|
|
||||||
* The order of keys is now preserved
|
|
||||||
* Calling [Set()](http://godoc.org/github.com/magiconair/properties#Properties.Set) with an empty key now silently ignores the call and does not create a new entry
|
|
||||||
* Added a [MustSet()](http://godoc.org/github.com/magiconair/properties#Properties.MustSet) method
|
|
||||||
* Migrated test library from launchpad.net/gocheck to [gopkg.in/check.v1](http://gopkg.in/check.v1)
|
|
||||||
|
|
||||||
v1.4.2, 15 Nov 2014
|
|
||||||
-------------------
|
|
||||||
* [Issue #2](https://github.com/magiconair/properties/issues/2): Fixed goroutine leak in parser which created two lexers but cleaned up only one
|
|
||||||
|
|
||||||
v1.4.1, 13 Nov 2014
|
|
||||||
-------------------
|
|
||||||
* [Issue #1](https://github.com/magiconair/properties/issues/1): Fixed bug in Keys() method which returned an empty string
|
|
||||||
|
|
||||||
v1.4.0, 23 Sep 2014
|
|
||||||
-------------------
|
|
||||||
* Added [Keys()](http://godoc.org/github.com/magiconair/properties#Properties.Keys) to get the keys
|
|
||||||
* Added [Filter()](http://godoc.org/github.com/magiconair/properties#Properties.Filter), [FilterRegexp()](http://godoc.org/github.com/magiconair/properties#Properties.FilterRegexp) and [FilterPrefix()](http://godoc.org/github.com/magiconair/properties#Properties.FilterPrefix) to get a subset of the properties
|
|
||||||
|
|
||||||
v1.3.0, 18 Mar 2014
|
|
||||||
-------------------
|
|
||||||
* Added support for time.Duration
|
|
||||||
* Made MustXXX() failure behavior configurable (log.Fatal, panic, custom)
|
|
||||||
* Changed default of MustXXX() failure from panic to log.Fatal
|
|
||||||
|
|
||||||
v1.2.0, 05 Mar 2014
|
|
||||||
-------------------
|
|
||||||
* Added MustGet... functions
|
|
||||||
* Added support for int and uint with range checks on 32 bit platforms
|
|
||||||
|
|
||||||
v1.1.0, 20 Jan 2014
|
|
||||||
-------------------
|
|
||||||
* Renamed from goproperties to properties
|
|
||||||
* Added support for expansion of environment vars in
|
|
||||||
filenames and value expressions
|
|
||||||
* Fixed bug where value expressions were not at the
|
|
||||||
start of the string
|
|
||||||
|
|
||||||
v1.0.0, 7 Jan 2014
|
|
||||||
------------------
|
|
||||||
* Initial release
|
|
||||||
|
|
||||||
License
|
|
||||||
-------
|
|
||||||
|
|
||||||
2 clause BSD license. See [LICENSE](https://github.com/magiconair/properties/blob/master/LICENSE) file for details.
|
|
||||||
|
|
||||||
ToDo
|
|
||||||
----
|
|
||||||
* Dump contents with passwords and secrets obscured
|
|
|
@ -1,135 +0,0 @@
|
||||||
// Copyright 2013-2014 Frank Schroeder. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package properties provides functions for reading and writing
|
|
||||||
// ISO-8859-1 and UTF-8 encoded .properties files and has
|
|
||||||
// support for recursive property expansion.
|
|
||||||
//
|
|
||||||
// Java properties files are ISO-8859-1 encoded and use Unicode
|
|
||||||
// literals for characters outside the ISO character set. Unicode
|
|
||||||
// literals can be used in UTF-8 encoded properties files but
|
|
||||||
// aren't necessary.
|
|
||||||
//
|
|
||||||
// To load a single properties file use MustLoadFile():
|
|
||||||
//
|
|
||||||
// p := properties.MustLoadFile(filename, properties.UTF8)
|
|
||||||
//
|
|
||||||
// To load multiple properties files use MustLoadFiles()
|
|
||||||
// which loads the files in the given order and merges the
|
|
||||||
// result. Missing properties files can be ignored if the
|
|
||||||
// 'ignoreMissing' flag is set to true.
|
|
||||||
//
|
|
||||||
// Filenames can contain environment variables which are expanded
|
|
||||||
// before loading.
|
|
||||||
//
|
|
||||||
// f1 := "/etc/myapp/myapp.conf"
|
|
||||||
// f2 := "/home/${USER}/myapp.conf"
|
|
||||||
// p := MustLoadFiles([]string{f1, f2}, properties.UTF8, true)
|
|
||||||
//
|
|
||||||
// All of the different key/value delimiters ' ', ':' and '=' are
|
|
||||||
// supported as well as the comment characters '!' and '#' and
|
|
||||||
// multi-line values.
|
|
||||||
//
|
|
||||||
// ! this is a comment
|
|
||||||
// # and so is this
|
|
||||||
//
|
|
||||||
// # the following expressions are equal
|
|
||||||
// key value
|
|
||||||
// key=value
|
|
||||||
// key:value
|
|
||||||
// key = value
|
|
||||||
// key : value
|
|
||||||
// key = val\
|
|
||||||
// ue
|
|
||||||
//
|
|
||||||
// Properties stores all comments preceding a key and provides
|
|
||||||
// GetComments() and SetComments() methods to retrieve and
|
|
||||||
// update them. The convenience functions GetComment() and
|
|
||||||
// SetComment() allow access to the last comment. The
|
|
||||||
// WriteComment() method writes properties files including
|
|
||||||
// the comments and with the keys in the original order.
|
|
||||||
// This can be used for sanitizing properties files.
|
|
||||||
//
|
|
||||||
// Property expansion is recursive and circular references
|
|
||||||
// and malformed expressions are not allowed and cause an
|
|
||||||
// error. Expansion of environment variables is supported.
|
|
||||||
//
|
|
||||||
// # standard property
|
|
||||||
// key = value
|
|
||||||
//
|
|
||||||
// # property expansion: key2 = value
|
|
||||||
// key2 = ${key}
|
|
||||||
//
|
|
||||||
// # recursive expansion: key3 = value
|
|
||||||
// key3 = ${key2}
|
|
||||||
//
|
|
||||||
// # circular reference (error)
|
|
||||||
// key = ${key}
|
|
||||||
//
|
|
||||||
// # malformed expression (error)
|
|
||||||
// key = ${ke
|
|
||||||
//
|
|
||||||
// # refers to the users' home dir
|
|
||||||
// home = ${HOME}
|
|
||||||
//
|
|
||||||
// # local key takes precendence over env var: u = foo
|
|
||||||
// USER = foo
|
|
||||||
// u = ${USER}
|
|
||||||
//
|
|
||||||
// The default property expansion format is ${key} but can be
|
|
||||||
// changed by setting different pre- and postfix values on the
|
|
||||||
// Properties object.
|
|
||||||
//
|
|
||||||
// p := properties.NewProperties()
|
|
||||||
// p.Prefix = "#["
|
|
||||||
// p.Postfix = "]#"
|
|
||||||
//
|
|
||||||
// Properties provides convenience functions for getting typed
|
|
||||||
// values with default values if the key does not exist or the
|
|
||||||
// type conversion failed.
|
|
||||||
//
|
|
||||||
// # Returns true if the value is either "1", "on", "yes" or "true"
|
|
||||||
// # Returns false for every other value and the default value if
|
|
||||||
// # the key does not exist.
|
|
||||||
// v = p.GetBool("key", false)
|
|
||||||
//
|
|
||||||
// # Returns the value if the key exists and the format conversion
|
|
||||||
// # was successful. Otherwise, the default value is returned.
|
|
||||||
// v = p.GetInt64("key", 999)
|
|
||||||
// v = p.GetUint64("key", 999)
|
|
||||||
// v = p.GetFloat64("key", 123.0)
|
|
||||||
// v = p.GetString("key", "def")
|
|
||||||
// v = p.GetDuration("key", 999)
|
|
||||||
//
|
|
||||||
// Properties provides several MustXXX() convenience functions
|
|
||||||
// which will terminate the app if an error occurs. The behavior
|
|
||||||
// of the failure is configurable and the default is to call
|
|
||||||
// log.Fatal(err). To have the MustXXX() functions panic instead
|
|
||||||
// of logging the error set a different ErrorHandler before
|
|
||||||
// you use the Properties package.
|
|
||||||
//
|
|
||||||
// properties.ErrorHandler = properties.PanicHandler
|
|
||||||
//
|
|
||||||
// # Will panic instead of logging an error
|
|
||||||
// p := properties.MustLoadFile("config.properties")
|
|
||||||
//
|
|
||||||
// You can also provide your own ErrorHandler function. The only requirement
|
|
||||||
// is that the error handler function must exit after handling the error.
|
|
||||||
//
|
|
||||||
// properties.ErrorHandler = func(err error) {
|
|
||||||
// fmt.Println(err)
|
|
||||||
// os.Exit(1)
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// # Will write to stdout and then exit
|
|
||||||
// p := properties.MustLoadFile("config.properties")
|
|
||||||
//
|
|
||||||
// The following documents provide a description of the properties
|
|
||||||
// file format.
|
|
||||||
//
|
|
||||||
// http://en.wikipedia.org/wiki/.properties
|
|
||||||
//
|
|
||||||
// http://docs.oracle.com/javase/7/docs/api/java/util/Properties.html#load%28java.io.Reader%29
|
|
||||||
//
|
|
||||||
package properties
|
|
|
@ -1,409 +0,0 @@
|
||||||
// Copyright 2013-2014 Frank Schroeder. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
//
|
|
||||||
// Parts of the lexer are from the template/text/parser package
|
|
||||||
// For these parts the following applies:
|
|
||||||
//
|
|
||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file of the go 1.2
|
|
||||||
// distribution.
|
|
||||||
|
|
||||||
package properties
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"unicode/utf8"
|
|
||||||
)
|
|
||||||
|
|
||||||
// item represents a token or text string returned from the scanner.
|
|
||||||
type item struct {
|
|
||||||
typ itemType // The type of this item.
|
|
||||||
pos int // The starting position, in bytes, of this item in the input string.
|
|
||||||
val string // The value of this item.
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i item) String() string {
|
|
||||||
switch {
|
|
||||||
case i.typ == itemEOF:
|
|
||||||
return "EOF"
|
|
||||||
case i.typ == itemError:
|
|
||||||
return i.val
|
|
||||||
case len(i.val) > 10:
|
|
||||||
return fmt.Sprintf("%.10q...", i.val)
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%q", i.val)
|
|
||||||
}
|
|
||||||
|
|
||||||
// itemType identifies the type of lex items.
|
|
||||||
type itemType int
|
|
||||||
|
|
||||||
const (
|
|
||||||
itemError itemType = iota // error occurred; value is text of error
|
|
||||||
itemEOF
|
|
||||||
itemKey // a key
|
|
||||||
itemValue // a value
|
|
||||||
itemComment // a comment
|
|
||||||
)
|
|
||||||
|
|
||||||
// defines a constant for EOF
|
|
||||||
const eof = -1
|
|
||||||
|
|
||||||
// permitted whitespace characters space, FF and TAB
|
|
||||||
const whitespace = " \f\t"
|
|
||||||
|
|
||||||
// stateFn represents the state of the scanner as a function that returns the next state.
|
|
||||||
type stateFn func(*lexer) stateFn
|
|
||||||
|
|
||||||
// lexer holds the state of the scanner.
|
|
||||||
type lexer struct {
|
|
||||||
input string // the string being scanned
|
|
||||||
state stateFn // the next lexing function to enter
|
|
||||||
pos int // current position in the input
|
|
||||||
start int // start position of this item
|
|
||||||
width int // width of last rune read from input
|
|
||||||
lastPos int // position of most recent item returned by nextItem
|
|
||||||
runes []rune // scanned runes for this item
|
|
||||||
items chan item // channel of scanned items
|
|
||||||
}
|
|
||||||
|
|
||||||
// next returns the next rune in the input.
|
|
||||||
func (l *lexer) next() rune {
|
|
||||||
if int(l.pos) >= len(l.input) {
|
|
||||||
l.width = 0
|
|
||||||
return eof
|
|
||||||
}
|
|
||||||
r, w := utf8.DecodeRuneInString(l.input[l.pos:])
|
|
||||||
l.width = w
|
|
||||||
l.pos += l.width
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
// peek returns but does not consume the next rune in the input.
|
|
||||||
func (l *lexer) peek() rune {
|
|
||||||
r := l.next()
|
|
||||||
l.backup()
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
// backup steps back one rune. Can only be called once per call of next.
|
|
||||||
func (l *lexer) backup() {
|
|
||||||
l.pos -= l.width
|
|
||||||
}
|
|
||||||
|
|
||||||
// emit passes an item back to the client.
|
|
||||||
func (l *lexer) emit(t itemType) {
|
|
||||||
item := item{t, l.start, string(l.runes)}
|
|
||||||
l.items <- item
|
|
||||||
l.start = l.pos
|
|
||||||
l.runes = l.runes[:0]
|
|
||||||
}
|
|
||||||
|
|
||||||
// ignore skips over the pending input before this point.
|
|
||||||
func (l *lexer) ignore() {
|
|
||||||
l.start = l.pos
|
|
||||||
}
|
|
||||||
|
|
||||||
// appends the rune to the current value
|
|
||||||
func (l *lexer) appendRune(r rune) {
|
|
||||||
l.runes = append(l.runes, r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// accept consumes the next rune if it's from the valid set.
|
|
||||||
func (l *lexer) accept(valid string) bool {
|
|
||||||
if strings.IndexRune(valid, l.next()) >= 0 {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
l.backup()
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// acceptRun consumes a run of runes from the valid set.
|
|
||||||
func (l *lexer) acceptRun(valid string) {
|
|
||||||
for strings.IndexRune(valid, l.next()) >= 0 {
|
|
||||||
}
|
|
||||||
l.backup()
|
|
||||||
}
|
|
||||||
|
|
||||||
// acceptRunUntil consumes a run of runes up to a terminator.
|
|
||||||
func (l *lexer) acceptRunUntil(term rune) {
|
|
||||||
for term != l.next() {
|
|
||||||
}
|
|
||||||
l.backup()
|
|
||||||
}
|
|
||||||
|
|
||||||
// hasText returns true if the current parsed text is not empty.
|
|
||||||
func (l *lexer) isNotEmpty() bool {
|
|
||||||
return l.pos > l.start
|
|
||||||
}
|
|
||||||
|
|
||||||
// lineNumber reports which line we're on, based on the position of
|
|
||||||
// the previous item returned by nextItem. Doing it this way
|
|
||||||
// means we don't have to worry about peek double counting.
|
|
||||||
func (l *lexer) lineNumber() int {
|
|
||||||
return 1 + strings.Count(l.input[:l.lastPos], "\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
// errorf returns an error token and terminates the scan by passing
|
|
||||||
// back a nil pointer that will be the next state, terminating l.nextItem.
|
|
||||||
func (l *lexer) errorf(format string, args ...interface{}) stateFn {
|
|
||||||
l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// nextItem returns the next item from the input.
|
|
||||||
func (l *lexer) nextItem() item {
|
|
||||||
item := <-l.items
|
|
||||||
l.lastPos = item.pos
|
|
||||||
return item
|
|
||||||
}
|
|
||||||
|
|
||||||
// lex creates a new scanner for the input string.
|
|
||||||
func lex(input string) *lexer {
|
|
||||||
l := &lexer{
|
|
||||||
input: input,
|
|
||||||
items: make(chan item),
|
|
||||||
runes: make([]rune, 0, 32),
|
|
||||||
}
|
|
||||||
go l.run()
|
|
||||||
return l
|
|
||||||
}
|
|
||||||
|
|
||||||
// run runs the state machine for the lexer.
|
|
||||||
func (l *lexer) run() {
|
|
||||||
for l.state = lexBeforeKey(l); l.state != nil; {
|
|
||||||
l.state = l.state(l)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// state functions
|
|
||||||
|
|
||||||
// lexBeforeKey scans until a key begins.
|
|
||||||
func lexBeforeKey(l *lexer) stateFn {
|
|
||||||
switch r := l.next(); {
|
|
||||||
case isEOF(r):
|
|
||||||
l.emit(itemEOF)
|
|
||||||
return nil
|
|
||||||
|
|
||||||
case isEOL(r):
|
|
||||||
l.ignore()
|
|
||||||
return lexBeforeKey
|
|
||||||
|
|
||||||
case isComment(r):
|
|
||||||
return lexComment
|
|
||||||
|
|
||||||
case isWhitespace(r):
|
|
||||||
l.acceptRun(whitespace)
|
|
||||||
l.ignore()
|
|
||||||
return lexKey
|
|
||||||
|
|
||||||
default:
|
|
||||||
l.backup()
|
|
||||||
return lexKey
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexComment scans a comment line. The comment character has already been scanned.
|
|
||||||
func lexComment(l *lexer) stateFn {
|
|
||||||
l.acceptRun(whitespace)
|
|
||||||
l.ignore()
|
|
||||||
for {
|
|
||||||
switch r := l.next(); {
|
|
||||||
case isEOF(r):
|
|
||||||
l.ignore()
|
|
||||||
l.emit(itemEOF)
|
|
||||||
return nil
|
|
||||||
case isEOL(r):
|
|
||||||
l.emit(itemComment)
|
|
||||||
return lexBeforeKey
|
|
||||||
default:
|
|
||||||
l.appendRune(r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexKey scans the key up to a delimiter
|
|
||||||
func lexKey(l *lexer) stateFn {
|
|
||||||
var r rune
|
|
||||||
|
|
||||||
Loop:
|
|
||||||
for {
|
|
||||||
switch r = l.next(); {
|
|
||||||
|
|
||||||
case isEscape(r):
|
|
||||||
err := l.scanEscapeSequence()
|
|
||||||
if err != nil {
|
|
||||||
return l.errorf(err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
case isEndOfKey(r):
|
|
||||||
l.backup()
|
|
||||||
break Loop
|
|
||||||
|
|
||||||
case isEOF(r):
|
|
||||||
break Loop
|
|
||||||
|
|
||||||
default:
|
|
||||||
l.appendRune(r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(l.runes) > 0 {
|
|
||||||
l.emit(itemKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
if isEOF(r) {
|
|
||||||
l.emit(itemEOF)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return lexBeforeValue
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexBeforeValue scans the delimiter between key and value.
|
|
||||||
// Leading and trailing whitespace is ignored.
|
|
||||||
// We expect to be just after the key.
|
|
||||||
func lexBeforeValue(l *lexer) stateFn {
|
|
||||||
l.acceptRun(whitespace)
|
|
||||||
l.accept(":=")
|
|
||||||
l.acceptRun(whitespace)
|
|
||||||
l.ignore()
|
|
||||||
return lexValue
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexValue scans text until the end of the line. We expect to be just after the delimiter.
|
|
||||||
func lexValue(l *lexer) stateFn {
|
|
||||||
for {
|
|
||||||
switch r := l.next(); {
|
|
||||||
case isEscape(r):
|
|
||||||
r := l.peek()
|
|
||||||
if isEOL(r) {
|
|
||||||
l.next()
|
|
||||||
l.acceptRun(whitespace)
|
|
||||||
} else {
|
|
||||||
err := l.scanEscapeSequence()
|
|
||||||
if err != nil {
|
|
||||||
return l.errorf(err.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case isEOL(r):
|
|
||||||
l.emit(itemValue)
|
|
||||||
l.ignore()
|
|
||||||
return lexBeforeKey
|
|
||||||
|
|
||||||
case isEOF(r):
|
|
||||||
l.emit(itemValue)
|
|
||||||
l.emit(itemEOF)
|
|
||||||
return nil
|
|
||||||
|
|
||||||
default:
|
|
||||||
l.appendRune(r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// scanEscapeSequence scans either one of the escaped characters
|
|
||||||
// or a unicode literal. We expect to be after the escape character.
|
|
||||||
func (l *lexer) scanEscapeSequence() error {
|
|
||||||
switch r := l.next(); {
|
|
||||||
|
|
||||||
case isEscapedCharacter(r):
|
|
||||||
l.appendRune(decodeEscapedCharacter(r))
|
|
||||||
return nil
|
|
||||||
|
|
||||||
case atUnicodeLiteral(r):
|
|
||||||
return l.scanUnicodeLiteral()
|
|
||||||
|
|
||||||
case isEOF(r):
|
|
||||||
return fmt.Errorf("premature EOF")
|
|
||||||
|
|
||||||
// silently drop the escape character and append the rune as is
|
|
||||||
default:
|
|
||||||
l.appendRune(r)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// scans a unicode literal in the form \uXXXX. We expect to be after the \u.
|
|
||||||
func (l *lexer) scanUnicodeLiteral() error {
|
|
||||||
// scan the digits
|
|
||||||
d := make([]rune, 4)
|
|
||||||
for i := 0; i < 4; i++ {
|
|
||||||
d[i] = l.next()
|
|
||||||
if d[i] == eof || !strings.ContainsRune("0123456789abcdefABCDEF", d[i]) {
|
|
||||||
return fmt.Errorf("invalid unicode literal")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// decode the digits into a rune
|
|
||||||
r, err := strconv.ParseInt(string(d), 16, 0)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
l.appendRune(rune(r))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// decodeEscapedCharacter returns the unescaped rune. We expect to be after the escape character.
|
|
||||||
func decodeEscapedCharacter(r rune) rune {
|
|
||||||
switch r {
|
|
||||||
case 'f':
|
|
||||||
return '\f'
|
|
||||||
case 'n':
|
|
||||||
return '\n'
|
|
||||||
case 'r':
|
|
||||||
return '\r'
|
|
||||||
case 't':
|
|
||||||
return '\t'
|
|
||||||
default:
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// atUnicodeLiteral reports whether we are at a unicode literal.
|
|
||||||
// The escape character has already been consumed.
|
|
||||||
func atUnicodeLiteral(r rune) bool {
|
|
||||||
return r == 'u'
|
|
||||||
}
|
|
||||||
|
|
||||||
// isComment reports whether we are at the start of a comment.
|
|
||||||
func isComment(r rune) bool {
|
|
||||||
return r == '#' || r == '!'
|
|
||||||
}
|
|
||||||
|
|
||||||
// isEndOfKey reports whether the rune terminates the current key.
|
|
||||||
func isEndOfKey(r rune) bool {
|
|
||||||
return strings.ContainsRune(" \f\t\r\n:=", r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// isEOF reports whether we are at EOF.
|
|
||||||
func isEOF(r rune) bool {
|
|
||||||
return r == eof
|
|
||||||
}
|
|
||||||
|
|
||||||
// isEOL reports whether we are at a new line character.
|
|
||||||
func isEOL(r rune) bool {
|
|
||||||
return r == '\n' || r == '\r'
|
|
||||||
}
|
|
||||||
|
|
||||||
// isEscape reports whether the rune is the escape character which
|
|
||||||
// prefixes unicode literals and other escaped characters.
|
|
||||||
func isEscape(r rune) bool {
|
|
||||||
return r == '\\'
|
|
||||||
}
|
|
||||||
|
|
||||||
// isEscapedCharacter reports whether we are at one of the characters that need escaping.
|
|
||||||
// The escape character has already been consumed.
|
|
||||||
func isEscapedCharacter(r rune) bool {
|
|
||||||
return strings.ContainsRune(" :=fnrt", r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// isWhitespace reports whether the rune is a whitespace character.
|
|
||||||
func isWhitespace(r rune) bool {
|
|
||||||
return strings.ContainsRune(whitespace, r)
|
|
||||||
}
|
|
|
@ -1,124 +0,0 @@
|
||||||
// Copyright 2013-2014 Frank Schroeder. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package properties
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Encoding specifies encoding of the input data.
|
|
||||||
type Encoding uint
|
|
||||||
|
|
||||||
const (
|
|
||||||
// UTF8 interprets the input data as UTF-8.
|
|
||||||
UTF8 Encoding = 1 << iota
|
|
||||||
|
|
||||||
// ISO_8859_1 interprets the input data as ISO-8859-1.
|
|
||||||
ISO_8859_1
|
|
||||||
)
|
|
||||||
|
|
||||||
// Load reads a buffer into a Properties struct.
|
|
||||||
func Load(buf []byte, enc Encoding) (*Properties, error) {
|
|
||||||
return loadBuf(buf, enc)
|
|
||||||
}
|
|
||||||
|
|
||||||
// LoadFile reads a file into a Properties struct.
|
|
||||||
func LoadFile(filename string, enc Encoding) (*Properties, error) {
|
|
||||||
return loadFiles([]string{filename}, enc, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// LoadFiles reads multiple files in the given order into
|
|
||||||
// a Properties struct. If 'ignoreMissing' is true then
|
|
||||||
// non-existent files will not be reported as error.
|
|
||||||
func LoadFiles(filenames []string, enc Encoding, ignoreMissing bool) (*Properties, error) {
|
|
||||||
return loadFiles(filenames, enc, ignoreMissing)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MustLoadFile reads a file into a Properties struct and
|
|
||||||
// panics on error.
|
|
||||||
func MustLoadFile(filename string, enc Encoding) *Properties {
|
|
||||||
return mustLoadFiles([]string{filename}, enc, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MustLoadFiles reads multiple files in the given order into
|
|
||||||
// a Properties struct and panics on error. If 'ignoreMissing'
|
|
||||||
// is true then non-existent files will not be reported as error.
|
|
||||||
func MustLoadFiles(filenames []string, enc Encoding, ignoreMissing bool) *Properties {
|
|
||||||
return mustLoadFiles(filenames, enc, ignoreMissing)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
|
|
||||||
func loadBuf(buf []byte, enc Encoding) (*Properties, error) {
|
|
||||||
p, err := parse(convert(buf, enc))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return p, p.check()
|
|
||||||
}
|
|
||||||
|
|
||||||
func loadFiles(filenames []string, enc Encoding, ignoreMissing bool) (*Properties, error) {
|
|
||||||
buff := make([]byte, 0, 4096)
|
|
||||||
|
|
||||||
for _, filename := range filenames {
|
|
||||||
f, err := expandFilename(filename)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
buf, err := ioutil.ReadFile(f)
|
|
||||||
if err != nil {
|
|
||||||
if ignoreMissing && os.IsNotExist(err) {
|
|
||||||
// TODO(frank): should we log that we are skipping the file?
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// concatenate the buffers and add a new line in case
|
|
||||||
// the previous file didn't end with a new line
|
|
||||||
buff = append(append(buff, buf...), '\n')
|
|
||||||
}
|
|
||||||
|
|
||||||
return loadBuf(buff, enc)
|
|
||||||
}
|
|
||||||
|
|
||||||
func mustLoadFiles(filenames []string, enc Encoding, ignoreMissing bool) *Properties {
|
|
||||||
p, err := loadFiles(filenames, enc, ignoreMissing)
|
|
||||||
if err != nil {
|
|
||||||
ErrorHandler(err)
|
|
||||||
}
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// expandFilename expands ${ENV_VAR} expressions in a filename.
|
|
||||||
// If the environment variable does not exist then it will be replaced
|
|
||||||
// with an empty string. Malformed expressions like "${ENV_VAR" will
|
|
||||||
// be reported as error.
|
|
||||||
func expandFilename(filename string) (string, error) {
|
|
||||||
return expand(filename, make(map[string]bool), "${", "}", make(map[string]string))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Interprets a byte buffer either as an ISO-8859-1 or UTF-8 encoded string.
|
|
||||||
// For ISO-8859-1 we can convert each byte straight into a rune since the
|
|
||||||
// first 256 unicode code points cover ISO-8859-1.
|
|
||||||
func convert(buf []byte, enc Encoding) string {
|
|
||||||
switch enc {
|
|
||||||
case UTF8:
|
|
||||||
return string(buf)
|
|
||||||
case ISO_8859_1:
|
|
||||||
runes := make([]rune, len(buf))
|
|
||||||
for i, b := range buf {
|
|
||||||
runes[i] = rune(b)
|
|
||||||
}
|
|
||||||
return string(runes)
|
|
||||||
default:
|
|
||||||
ErrorHandler(fmt.Errorf("unsupported encoding %v", enc))
|
|
||||||
}
|
|
||||||
panic("ErrorHandler should exit")
|
|
||||||
}
|
|
|
@ -1,95 +0,0 @@
|
||||||
// Copyright 2013-2014 Frank Schroeder. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package properties
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
type parser struct {
|
|
||||||
lex *lexer
|
|
||||||
}
|
|
||||||
|
|
||||||
func parse(input string) (properties *Properties, err error) {
|
|
||||||
p := &parser{lex: lex(input)}
|
|
||||||
defer p.recover(&err)
|
|
||||||
|
|
||||||
properties = NewProperties()
|
|
||||||
key := ""
|
|
||||||
comments := []string{}
|
|
||||||
|
|
||||||
for {
|
|
||||||
token := p.expectOneOf(itemComment, itemKey, itemEOF)
|
|
||||||
switch token.typ {
|
|
||||||
case itemEOF:
|
|
||||||
goto done
|
|
||||||
case itemComment:
|
|
||||||
comments = append(comments, token.val)
|
|
||||||
continue
|
|
||||||
case itemKey:
|
|
||||||
key = token.val
|
|
||||||
if _, ok := properties.m[key]; !ok {
|
|
||||||
properties.k = append(properties.k, key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
token = p.expectOneOf(itemValue, itemEOF)
|
|
||||||
if len(comments) > 0 {
|
|
||||||
properties.c[key] = comments
|
|
||||||
comments = []string{}
|
|
||||||
}
|
|
||||||
switch token.typ {
|
|
||||||
case itemEOF:
|
|
||||||
properties.m[key] = ""
|
|
||||||
goto done
|
|
||||||
case itemValue:
|
|
||||||
properties.m[key] = token.val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
done:
|
|
||||||
return properties, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) errorf(format string, args ...interface{}) {
|
|
||||||
format = fmt.Sprintf("properties: Line %d: %s", p.lex.lineNumber(), format)
|
|
||||||
panic(fmt.Errorf(format, args...))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) expect(expected itemType) (token item) {
|
|
||||||
token = p.lex.nextItem()
|
|
||||||
if token.typ != expected {
|
|
||||||
p.unexpected(token)
|
|
||||||
}
|
|
||||||
return token
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) expectOneOf(expected ...itemType) (token item) {
|
|
||||||
token = p.lex.nextItem()
|
|
||||||
for _, v := range expected {
|
|
||||||
if token.typ == v {
|
|
||||||
return token
|
|
||||||
}
|
|
||||||
}
|
|
||||||
p.unexpected(token)
|
|
||||||
panic("unexpected token")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) unexpected(token item) {
|
|
||||||
p.errorf(token.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
// recover is the handler that turns panics into returns from the top level of Parse.
|
|
||||||
func (p *parser) recover(errp *error) {
|
|
||||||
e := recover()
|
|
||||||
if e != nil {
|
|
||||||
if _, ok := e.(runtime.Error); ok {
|
|
||||||
panic(e)
|
|
||||||
}
|
|
||||||
*errp = e.(error)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
|
@ -1,698 +0,0 @@
|
||||||
// Copyright 2013-2014 Frank Schroeder. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package properties
|
|
||||||
|
|
||||||
// BUG(frank): Set() does not check for invalid unicode literals since this is currently handled by the lexer.
|
|
||||||
// BUG(frank): Write() does not allow to configure the newline character. Therefore, on Windows LF is used.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"regexp"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
"unicode/utf8"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ErrorHandlerFunc defines the type of function which handles failures
|
|
||||||
// of the MustXXX() functions. An error handler function must exit
|
|
||||||
// the application after handling the error.
|
|
||||||
type ErrorHandlerFunc func(error)
|
|
||||||
|
|
||||||
// ErrorHandler is the function which handles failures of the MustXXX()
|
|
||||||
// functions. The default is LogFatalHandler.
|
|
||||||
var ErrorHandler = LogFatalHandler
|
|
||||||
|
|
||||||
// LogFatalHandler handles the error by logging a fatal error and exiting.
|
|
||||||
func LogFatalHandler(err error) {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PanicHandler handles the error by panicking.
|
|
||||||
func PanicHandler(err error) {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// -----------------------------------------------------------------------------
|
|
||||||
|
|
||||||
// A Properties contains the key/value pairs from the properties input.
|
|
||||||
// All values are stored in unexpanded form and are expanded at runtime
|
|
||||||
type Properties struct {
|
|
||||||
// Pre-/Postfix for property expansion.
|
|
||||||
Prefix string
|
|
||||||
Postfix string
|
|
||||||
|
|
||||||
// Stores the key/value pairs
|
|
||||||
m map[string]string
|
|
||||||
|
|
||||||
// Stores the comments per key.
|
|
||||||
c map[string][]string
|
|
||||||
|
|
||||||
// Stores the keys in order of appearance.
|
|
||||||
k []string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewProperties creates a new Properties struct with the default
|
|
||||||
// configuration for "${key}" expressions.
|
|
||||||
func NewProperties() *Properties {
|
|
||||||
return &Properties{
|
|
||||||
Prefix: "${",
|
|
||||||
Postfix: "}",
|
|
||||||
m: map[string]string{},
|
|
||||||
c: map[string][]string{},
|
|
||||||
k: []string{},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get returns the expanded value for the given key if exists.
|
|
||||||
// Otherwise, ok is false.
|
|
||||||
func (p *Properties) Get(key string) (value string, ok bool) {
|
|
||||||
v, ok := p.m[key]
|
|
||||||
if !ok {
|
|
||||||
return "", false
|
|
||||||
}
|
|
||||||
|
|
||||||
expanded, err := p.expand(v)
|
|
||||||
|
|
||||||
// we guarantee that the expanded value is free of
|
|
||||||
// circular references and malformed expressions
|
|
||||||
// so we panic if we still get an error here.
|
|
||||||
if err != nil {
|
|
||||||
ErrorHandler(fmt.Errorf("%s in %q", err, key+" = "+v))
|
|
||||||
}
|
|
||||||
|
|
||||||
return expanded, true
|
|
||||||
}
|
|
||||||
|
|
||||||
// MustGet returns the expanded value for the given key if exists.
|
|
||||||
// Otherwise, it panics.
|
|
||||||
func (p *Properties) MustGet(key string) string {
|
|
||||||
if v, ok := p.Get(key); ok {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
ErrorHandler(invalidKeyError(key))
|
|
||||||
panic("ErrorHandler should exit")
|
|
||||||
}
|
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
|
|
||||||
// ClearComments removes the comments for all keys.
|
|
||||||
func (p *Properties) ClearComments() {
|
|
||||||
p.c = map[string][]string{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
|
|
||||||
// GetComment returns the last comment before the given key or an empty string.
|
|
||||||
func (p *Properties) GetComment(key string) string {
|
|
||||||
comments, ok := p.c[key]
|
|
||||||
if !ok || len(comments) == 0 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return comments[len(comments)-1]
|
|
||||||
}
|
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
|
|
||||||
// GetComments returns all comments that appeared before the given key or nil.
|
|
||||||
func (p *Properties) GetComments(key string) []string {
|
|
||||||
if comments, ok := p.c[key]; ok {
|
|
||||||
return comments
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
|
|
||||||
// SetComment sets the comment for the key.
|
|
||||||
func (p *Properties) SetComment(key, comment string) {
|
|
||||||
p.c[key] = []string{comment}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
|
|
||||||
// SetComments sets the comments for the key. If the comments are nil then
|
|
||||||
// all comments for this key are deleted.
|
|
||||||
func (p *Properties) SetComments(key string, comments []string) {
|
|
||||||
if comments == nil {
|
|
||||||
delete(p.c, key)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
p.c[key] = comments
|
|
||||||
}
|
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
|
|
||||||
// GetBool checks if the expanded value is one of '1', 'yes',
|
|
||||||
// 'true' or 'on' if the key exists. The comparison is case-insensitive.
|
|
||||||
// If the key does not exist the default value is returned.
|
|
||||||
func (p *Properties) GetBool(key string, def bool) bool {
|
|
||||||
v, err := p.getBool(key)
|
|
||||||
if err != nil {
|
|
||||||
return def
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// MustGetBool checks if the expanded value is one of '1', 'yes',
|
|
||||||
// 'true' or 'on' if the key exists. The comparison is case-insensitive.
|
|
||||||
// If the key does not exist the function panics.
|
|
||||||
func (p *Properties) MustGetBool(key string) bool {
|
|
||||||
v, err := p.getBool(key)
|
|
||||||
if err != nil {
|
|
||||||
ErrorHandler(err)
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Properties) getBool(key string) (value bool, err error) {
|
|
||||||
if v, ok := p.Get(key); ok {
|
|
||||||
v = strings.ToLower(v)
|
|
||||||
return v == "1" || v == "true" || v == "yes" || v == "on", nil
|
|
||||||
}
|
|
||||||
return false, invalidKeyError(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
|
|
||||||
// GetDuration parses the expanded value as an time.Duration (in ns) if the
|
|
||||||
// key exists. If key does not exist or the value cannot be parsed the default
|
|
||||||
// value is returned. In almost all cases you want to use GetParsedDuration().
|
|
||||||
func (p *Properties) GetDuration(key string, def time.Duration) time.Duration {
|
|
||||||
v, err := p.getInt64(key)
|
|
||||||
if err != nil {
|
|
||||||
return def
|
|
||||||
}
|
|
||||||
return time.Duration(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MustGetDuration parses the expanded value as an time.Duration (in ns) if
|
|
||||||
// the key exists. If key does not exist or the value cannot be parsed the
|
|
||||||
// function panics. In almost all cases you want to use MustGetParsedDuration().
|
|
||||||
func (p *Properties) MustGetDuration(key string) time.Duration {
|
|
||||||
v, err := p.getInt64(key)
|
|
||||||
if err != nil {
|
|
||||||
ErrorHandler(err)
|
|
||||||
}
|
|
||||||
return time.Duration(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
|
|
||||||
// GetParsedDuration parses the expanded value with time.ParseDuration() if the key exists.
|
|
||||||
// If key does not exist or the value cannot be parsed the default
|
|
||||||
// value is returned.
|
|
||||||
func (p *Properties) GetParsedDuration(key string, def time.Duration) time.Duration {
|
|
||||||
s, ok := p.Get(key)
|
|
||||||
if !ok {
|
|
||||||
return def
|
|
||||||
}
|
|
||||||
v, err := time.ParseDuration(s)
|
|
||||||
if err != nil {
|
|
||||||
return def
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// MustGetParsedDuration parses the expanded value with time.ParseDuration() if the key exists.
|
|
||||||
// If key does not exist or the value cannot be parsed the function panics.
|
|
||||||
func (p *Properties) MustGetParsedDuration(key string) time.Duration {
|
|
||||||
s, ok := p.Get(key)
|
|
||||||
if !ok {
|
|
||||||
ErrorHandler(invalidKeyError(key))
|
|
||||||
}
|
|
||||||
v, err := time.ParseDuration(s)
|
|
||||||
if err != nil {
|
|
||||||
ErrorHandler(err)
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
|
|
||||||
// GetFloat64 parses the expanded value as a float64 if the key exists.
|
|
||||||
// If key does not exist or the value cannot be parsed the default
|
|
||||||
// value is returned.
|
|
||||||
func (p *Properties) GetFloat64(key string, def float64) float64 {
|
|
||||||
v, err := p.getFloat64(key)
|
|
||||||
if err != nil {
|
|
||||||
return def
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// MustGetFloat64 parses the expanded value as a float64 if the key exists.
|
|
||||||
// If key does not exist or the value cannot be parsed the function panics.
|
|
||||||
func (p *Properties) MustGetFloat64(key string) float64 {
|
|
||||||
v, err := p.getFloat64(key)
|
|
||||||
if err != nil {
|
|
||||||
ErrorHandler(err)
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Properties) getFloat64(key string) (value float64, err error) {
|
|
||||||
if v, ok := p.Get(key); ok {
|
|
||||||
value, err = strconv.ParseFloat(v, 64)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return value, nil
|
|
||||||
}
|
|
||||||
return 0, invalidKeyError(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
|
|
||||||
// GetInt parses the expanded value as an int if the key exists.
|
|
||||||
// If key does not exist or the value cannot be parsed the default
|
|
||||||
// value is returned. If the value does not fit into an int the
|
|
||||||
// function panics with an out of range error.
|
|
||||||
func (p *Properties) GetInt(key string, def int) int {
|
|
||||||
v, err := p.getInt64(key)
|
|
||||||
if err != nil {
|
|
||||||
return def
|
|
||||||
}
|
|
||||||
return intRangeCheck(key, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MustGetInt parses the expanded value as an int if the key exists.
|
|
||||||
// If key does not exist or the value cannot be parsed the function panics.
|
|
||||||
// If the value does not fit into an int the function panics with
|
|
||||||
// an out of range error.
|
|
||||||
func (p *Properties) MustGetInt(key string) int {
|
|
||||||
v, err := p.getInt64(key)
|
|
||||||
if err != nil {
|
|
||||||
ErrorHandler(err)
|
|
||||||
}
|
|
||||||
return intRangeCheck(key, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
|
|
||||||
// GetInt64 parses the expanded value as an int64 if the key exists.
|
|
||||||
// If key does not exist or the value cannot be parsed the default
|
|
||||||
// value is returned.
|
|
||||||
func (p *Properties) GetInt64(key string, def int64) int64 {
|
|
||||||
v, err := p.getInt64(key)
|
|
||||||
if err != nil {
|
|
||||||
return def
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// MustGetInt64 parses the expanded value as an int if the key exists.
|
|
||||||
// If key does not exist or the value cannot be parsed the function panics.
|
|
||||||
func (p *Properties) MustGetInt64(key string) int64 {
|
|
||||||
v, err := p.getInt64(key)
|
|
||||||
if err != nil {
|
|
||||||
ErrorHandler(err)
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Properties) getInt64(key string) (value int64, err error) {
|
|
||||||
if v, ok := p.Get(key); ok {
|
|
||||||
value, err = strconv.ParseInt(v, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return value, nil
|
|
||||||
}
|
|
||||||
return 0, invalidKeyError(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
|
|
||||||
// GetUint parses the expanded value as an uint if the key exists.
|
|
||||||
// If key does not exist or the value cannot be parsed the default
|
|
||||||
// value is returned. If the value does not fit into an int the
|
|
||||||
// function panics with an out of range error.
|
|
||||||
func (p *Properties) GetUint(key string, def uint) uint {
|
|
||||||
v, err := p.getUint64(key)
|
|
||||||
if err != nil {
|
|
||||||
return def
|
|
||||||
}
|
|
||||||
return uintRangeCheck(key, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MustGetUint parses the expanded value as an int if the key exists.
|
|
||||||
// If key does not exist or the value cannot be parsed the function panics.
|
|
||||||
// If the value does not fit into an int the function panics with
|
|
||||||
// an out of range error.
|
|
||||||
func (p *Properties) MustGetUint(key string) uint {
|
|
||||||
v, err := p.getUint64(key)
|
|
||||||
if err != nil {
|
|
||||||
ErrorHandler(err)
|
|
||||||
}
|
|
||||||
return uintRangeCheck(key, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
|
|
||||||
// GetUint64 parses the expanded value as an uint64 if the key exists.
|
|
||||||
// If key does not exist or the value cannot be parsed the default
|
|
||||||
// value is returned.
|
|
||||||
func (p *Properties) GetUint64(key string, def uint64) uint64 {
|
|
||||||
v, err := p.getUint64(key)
|
|
||||||
if err != nil {
|
|
||||||
return def
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// MustGetUint64 parses the expanded value as an int if the key exists.
|
|
||||||
// If key does not exist or the value cannot be parsed the function panics.
|
|
||||||
func (p *Properties) MustGetUint64(key string) uint64 {
|
|
||||||
v, err := p.getUint64(key)
|
|
||||||
if err != nil {
|
|
||||||
ErrorHandler(err)
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Properties) getUint64(key string) (value uint64, err error) {
|
|
||||||
if v, ok := p.Get(key); ok {
|
|
||||||
value, err = strconv.ParseUint(v, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return value, nil
|
|
||||||
}
|
|
||||||
return 0, invalidKeyError(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
|
|
||||||
// GetString returns the expanded value for the given key if exists or
|
|
||||||
// the default value otherwise.
|
|
||||||
func (p *Properties) GetString(key, def string) string {
|
|
||||||
if v, ok := p.Get(key); ok {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
return def
|
|
||||||
}
|
|
||||||
|
|
||||||
// MustGetString returns the expanded value for the given key if exists or
|
|
||||||
// panics otherwise.
|
|
||||||
func (p *Properties) MustGetString(key string) string {
|
|
||||||
if v, ok := p.Get(key); ok {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
ErrorHandler(invalidKeyError(key))
|
|
||||||
panic("ErrorHandler should exit")
|
|
||||||
}
|
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
|
|
||||||
// Filter returns a new properties object which contains all properties
|
|
||||||
// for which the key matches the pattern.
|
|
||||||
func (p *Properties) Filter(pattern string) (*Properties, error) {
|
|
||||||
re, err := regexp.Compile(pattern)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return p.FilterRegexp(re), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// FilterRegexp returns a new properties object which contains all properties
|
|
||||||
// for which the key matches the regular expression.
|
|
||||||
func (p *Properties) FilterRegexp(re *regexp.Regexp) *Properties {
|
|
||||||
pp := NewProperties()
|
|
||||||
for _, k := range p.k {
|
|
||||||
if re.MatchString(k) {
|
|
||||||
pp.Set(k, p.m[k])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return pp
|
|
||||||
}
|
|
||||||
|
|
||||||
// FilterPrefix returns a new properties object which contains all properties
|
|
||||||
// for which the key starts with the prefix.
|
|
||||||
func (p *Properties) FilterPrefix(prefix string) *Properties {
|
|
||||||
pp := NewProperties()
|
|
||||||
for _, k := range p.k {
|
|
||||||
if strings.HasPrefix(k, prefix) {
|
|
||||||
pp.Set(k, p.m[k])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return pp
|
|
||||||
}
|
|
||||||
|
|
||||||
// Len returns the number of keys.
|
|
||||||
func (p *Properties) Len() int {
|
|
||||||
return len(p.m)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Keys returns all keys in the same order as in the input.
|
|
||||||
func (p *Properties) Keys() []string {
|
|
||||||
keys := make([]string, len(p.k))
|
|
||||||
for i, k := range p.k {
|
|
||||||
keys[i] = k
|
|
||||||
}
|
|
||||||
return keys
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set sets the property key to the corresponding value.
|
|
||||||
// If a value for key existed before then ok is true and prev
|
|
||||||
// contains the previous value. If the value contains a
|
|
||||||
// circular reference or a malformed expression then
|
|
||||||
// an error is returned.
|
|
||||||
// An empty key is silently ignored.
|
|
||||||
func (p *Properties) Set(key, value string) (prev string, ok bool, err error) {
|
|
||||||
if key == "" {
|
|
||||||
return "", false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// to check for a circular reference we temporarily need
|
|
||||||
// to set the new value. If there is an error then revert
|
|
||||||
// to the previous state. Only if all tests are successful
|
|
||||||
// then we add the key to the p.k list.
|
|
||||||
prev, ok = p.Get(key)
|
|
||||||
p.m[key] = value
|
|
||||||
|
|
||||||
// now check for a circular reference
|
|
||||||
_, err = p.expand(value)
|
|
||||||
if err != nil {
|
|
||||||
|
|
||||||
// revert to the previous state
|
|
||||||
if ok {
|
|
||||||
p.m[key] = prev
|
|
||||||
} else {
|
|
||||||
delete(p.m, key)
|
|
||||||
}
|
|
||||||
|
|
||||||
return "", false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if !ok {
|
|
||||||
p.k = append(p.k, key)
|
|
||||||
}
|
|
||||||
|
|
||||||
return prev, ok, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MustSet sets the property key to the corresponding value.
|
|
||||||
// If a value for key existed before then ok is true and prev
|
|
||||||
// contains the previous value. An empty key is silently ignored.
|
|
||||||
func (p *Properties) MustSet(key, value string) (prev string, ok bool) {
|
|
||||||
prev, ok, err := p.Set(key, value)
|
|
||||||
if err != nil {
|
|
||||||
ErrorHandler(err)
|
|
||||||
}
|
|
||||||
return prev, ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns a string of all expanded 'key = value' pairs.
|
|
||||||
func (p *Properties) String() string {
|
|
||||||
var s string
|
|
||||||
for _, key := range p.k {
|
|
||||||
value, _ := p.Get(key)
|
|
||||||
s = fmt.Sprintf("%s%s = %s\n", s, key, value)
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write writes all unexpanded 'key = value' pairs to the given writer.
|
|
||||||
// Write returns the number of bytes written and any write error encountered.
|
|
||||||
func (p *Properties) Write(w io.Writer, enc Encoding) (n int, err error) {
|
|
||||||
return p.WriteComment(w, "", enc)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteComment writes all unexpanced 'key = value' pairs to the given writer.
|
|
||||||
// If prefix is not empty then comments are written with a blank line and the
|
|
||||||
// given prefix. The prefix should be either "# " or "! " to be compatible with
|
|
||||||
// the properties file format. Otherwise, the properties parser will not be
|
|
||||||
// able to read the file back in. It returns the number of bytes written and
|
|
||||||
// any write error encountered.
|
|
||||||
func (p *Properties) WriteComment(w io.Writer, prefix string, enc Encoding) (n int, err error) {
|
|
||||||
var x int
|
|
||||||
|
|
||||||
for _, key := range p.k {
|
|
||||||
value := p.m[key]
|
|
||||||
|
|
||||||
if prefix != "" {
|
|
||||||
if comments, ok := p.c[key]; ok {
|
|
||||||
// don't print comments if they are all empty
|
|
||||||
allEmpty := true
|
|
||||||
for _, c := range comments {
|
|
||||||
if c != "" {
|
|
||||||
allEmpty = false
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !allEmpty {
|
|
||||||
// add a blank line between entries but not at the top
|
|
||||||
if len(comments) > 0 && n > 0 {
|
|
||||||
x, err = fmt.Fprintln(w)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
n += x
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, c := range comments {
|
|
||||||
x, err = fmt.Fprintf(w, "%s%s\n", prefix, encode(c, "", enc))
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
n += x
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
x, err = fmt.Fprintf(w, "%s = %s\n", encode(key, " :", enc), encode(value, "", enc))
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
n += x
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
|
|
||||||
// check expands all values and returns an error if a circular reference or
|
|
||||||
// a malformed expression was found.
|
|
||||||
func (p *Properties) check() error {
|
|
||||||
for _, value := range p.m {
|
|
||||||
if _, err := p.expand(value); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Properties) expand(input string) (string, error) {
|
|
||||||
// no pre/postfix -> nothing to expand
|
|
||||||
if p.Prefix == "" && p.Postfix == "" {
|
|
||||||
return input, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return expand(input, make(map[string]bool), p.Prefix, p.Postfix, p.m)
|
|
||||||
}
|
|
||||||
|
|
||||||
// expand recursively expands expressions of '(prefix)key(postfix)' to their corresponding values.
|
|
||||||
// The function keeps track of the keys that were already expanded and stops if it
|
|
||||||
// detects a circular reference or a malformed expression of the form '(prefix)key'.
|
|
||||||
func expand(s string, keys map[string]bool, prefix, postfix string, values map[string]string) (string, error) {
|
|
||||||
start := strings.Index(s, prefix)
|
|
||||||
if start == -1 {
|
|
||||||
return s, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
keyStart := start + len(prefix)
|
|
||||||
keyLen := strings.Index(s[keyStart:], postfix)
|
|
||||||
if keyLen == -1 {
|
|
||||||
return "", fmt.Errorf("malformed expression")
|
|
||||||
}
|
|
||||||
|
|
||||||
end := keyStart + keyLen + len(postfix) - 1
|
|
||||||
key := s[keyStart : keyStart+keyLen]
|
|
||||||
|
|
||||||
// fmt.Printf("s:%q pp:%q start:%d end:%d keyStart:%d keyLen:%d key:%q\n", s, prefix + "..." + postfix, start, end, keyStart, keyLen, key)
|
|
||||||
|
|
||||||
if _, ok := keys[key]; ok {
|
|
||||||
return "", fmt.Errorf("circular reference")
|
|
||||||
}
|
|
||||||
|
|
||||||
val, ok := values[key]
|
|
||||||
if !ok {
|
|
||||||
val = os.Getenv(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
// remember that we've seen the key
|
|
||||||
keys[key] = true
|
|
||||||
|
|
||||||
return expand(s[:start]+val+s[end+1:], keys, prefix, postfix, values)
|
|
||||||
}
|
|
||||||
|
|
||||||
// encode encodes a UTF-8 string to ISO-8859-1 and escapes some characters.
|
|
||||||
func encode(s string, special string, enc Encoding) string {
|
|
||||||
switch enc {
|
|
||||||
case UTF8:
|
|
||||||
return encodeUtf8(s, special)
|
|
||||||
case ISO_8859_1:
|
|
||||||
return encodeIso(s, special)
|
|
||||||
default:
|
|
||||||
panic(fmt.Sprintf("unsupported encoding %v", enc))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func encodeUtf8(s string, special string) string {
|
|
||||||
v := ""
|
|
||||||
for pos := 0; pos < len(s); {
|
|
||||||
r, w := utf8.DecodeRuneInString(s[pos:])
|
|
||||||
pos += w
|
|
||||||
v += escape(r, special)
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func encodeIso(s string, special string) string {
|
|
||||||
var r rune
|
|
||||||
var w int
|
|
||||||
var v string
|
|
||||||
for pos := 0; pos < len(s); {
|
|
||||||
switch r, w = utf8.DecodeRuneInString(s[pos:]); {
|
|
||||||
case r < 1<<8: // single byte rune -> escape special chars only
|
|
||||||
v += escape(r, special)
|
|
||||||
case r < 1<<16: // two byte rune -> unicode literal
|
|
||||||
v += fmt.Sprintf("\\u%04x", r)
|
|
||||||
default: // more than two bytes per rune -> can't encode
|
|
||||||
v += "?"
|
|
||||||
}
|
|
||||||
pos += w
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func escape(r rune, special string) string {
|
|
||||||
switch r {
|
|
||||||
case '\f':
|
|
||||||
return "\\f"
|
|
||||||
case '\n':
|
|
||||||
return "\\n"
|
|
||||||
case '\r':
|
|
||||||
return "\\r"
|
|
||||||
case '\t':
|
|
||||||
return "\\t"
|
|
||||||
default:
|
|
||||||
if strings.ContainsRune(special, r) {
|
|
||||||
return "\\" + string(r)
|
|
||||||
}
|
|
||||||
return string(r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func invalidKeyError(key string) error {
|
|
||||||
return fmt.Errorf("unknown property: %s", key)
|
|
||||||
}
|
|
|
@ -1,31 +0,0 @@
|
||||||
// Copyright 2013-2014 Frank Schroeder. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package properties
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"math"
|
|
||||||
)
|
|
||||||
|
|
||||||
// make this a var to overwrite it in a test
|
|
||||||
var is32Bit = ^uint(0) == math.MaxUint32
|
|
||||||
|
|
||||||
// intRangeCheck checks if the value fits into the int type and
|
|
||||||
// panics if it does not.
|
|
||||||
func intRangeCheck(key string, v int64) int {
|
|
||||||
if is32Bit && (v < math.MinInt32 || v > math.MaxInt32) {
|
|
||||||
panic(fmt.Sprintf("Value %d for key %s out of range", v, key))
|
|
||||||
}
|
|
||||||
return int(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// uintRangeCheck checks if the value fits into the uint type and
|
|
||||||
// panics if it does not.
|
|
||||||
func uintRangeCheck(key string, v uint64) uint {
|
|
||||||
if is32Bit && v > math.MaxUint32 {
|
|
||||||
panic(fmt.Sprintf("Value %d for key %s out of range", v, key))
|
|
||||||
}
|
|
||||||
return uint(v)
|
|
||||||
}
|
|
|
@ -1,21 +0,0 @@
|
||||||
The MIT License (MIT)
|
|
||||||
|
|
||||||
Copyright (c) 2014 Steve Francia
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all
|
|
||||||
copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
SOFTWARE.
|
|
|
@ -1,72 +0,0 @@
|
||||||
cast
|
|
||||||
====
|
|
||||||
|
|
||||||
Easy and safe casting from one type to another in Go
|
|
||||||
|
|
||||||
Don’t Panic! ... Cast
|
|
||||||
|
|
||||||
## What is Cast?
|
|
||||||
|
|
||||||
Cast is a library to convert between different go types in a consistent and easy way.
|
|
||||||
|
|
||||||
Cast provides simple functions to easily convert a number to a string, an
|
|
||||||
interface into a bool, etc. Cast does this intelligently when an obvious
|
|
||||||
conversion is possible. It doesn’t make any attempts to guess what you meant,
|
|
||||||
for example you can only convert a string to an int when it is a string
|
|
||||||
representation of an int such as “8”. Cast was developed for use in
|
|
||||||
[Hugo](http://hugo.spf13.com), a website engine which uses YAML, TOML or JSON
|
|
||||||
for meta data.
|
|
||||||
|
|
||||||
## Why use Cast?
|
|
||||||
|
|
||||||
When working with dynamic data in Go you often need to cast or convert the data
|
|
||||||
from one type into another. Cast goes beyond just using type assertion (though
|
|
||||||
it uses that when possible) to provide a very straightforward and convenient
|
|
||||||
library.
|
|
||||||
|
|
||||||
If you are working with interfaces to handle things like dynamic content
|
|
||||||
you’ll need an easy way to convert an interface into a given type. This
|
|
||||||
is the library for you.
|
|
||||||
|
|
||||||
If you are taking in data from YAML, TOML or JSON or other formats which lack
|
|
||||||
full types, then Cast is the library for you.
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
Cast provides a handful of To_____ methods. These methods will always return
|
|
||||||
the desired type. **If input is provided that will not convert to that type, the
|
|
||||||
0 or nil value for that type will be returned**.
|
|
||||||
|
|
||||||
Cast also provides identical methods To_____E. These return the same result as
|
|
||||||
the To_____ methods, plus an additional error which tells you if it successfully
|
|
||||||
converted. Using these methods you can tell the difference between when the
|
|
||||||
input matched the zero value or when the conversion failed and the zero value
|
|
||||||
was returned.
|
|
||||||
|
|
||||||
The following examples are merely a sample of what is available. Please review
|
|
||||||
the code for a complete set.
|
|
||||||
|
|
||||||
### Example ‘ToString’:
|
|
||||||
|
|
||||||
cast.ToString("mayonegg") // "mayonegg"
|
|
||||||
cast.ToString(8) // "8"
|
|
||||||
cast.ToString(8.31) // "8.31"
|
|
||||||
cast.ToString([]byte("one time")) // "one time"
|
|
||||||
cast.ToString(nil) // ""
|
|
||||||
|
|
||||||
var foo interface{} = "one more time"
|
|
||||||
cast.ToString(foo) // "one more time"
|
|
||||||
|
|
||||||
|
|
||||||
### Example ‘ToInt’:
|
|
||||||
|
|
||||||
cast.ToInt(8) // 8
|
|
||||||
cast.ToInt(8.31) // 8
|
|
||||||
cast.ToInt("8") // 8
|
|
||||||
cast.ToInt(true) // 1
|
|
||||||
cast.ToInt(false) // 0
|
|
||||||
|
|
||||||
var eight interface{} = 8
|
|
||||||
cast.ToInt(eight) // 8
|
|
||||||
cast.ToInt(nil) // 0
|
|
||||||
|
|
|
@ -1,68 +0,0 @@
|
||||||
// Copyright © 2014 Steve Francia <spf@spf13.com>.
|
|
||||||
//
|
|
||||||
// Use of this source code is governed by an MIT-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package cast
|
|
||||||
|
|
||||||
import "time"
|
|
||||||
|
|
||||||
func ToBool(i interface{}) bool {
|
|
||||||
v, _ := ToBoolE(i)
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func ToTime(i interface{}) time.Time {
|
|
||||||
v, _ := ToTimeE(i)
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func ToDuration(i interface{}) time.Duration {
|
|
||||||
v, _ := ToDurationE(i)
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func ToFloat64(i interface{}) float64 {
|
|
||||||
v, _ := ToFloat64E(i)
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func ToInt(i interface{}) int {
|
|
||||||
v, _ := ToIntE(i)
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func ToString(i interface{}) string {
|
|
||||||
v, _ := ToStringE(i)
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func ToStringMapString(i interface{}) map[string]string {
|
|
||||||
v, _ := ToStringMapStringE(i)
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func ToStringMapBool(i interface{}) map[string]bool {
|
|
||||||
v, _ := ToStringMapBoolE(i)
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func ToStringMap(i interface{}) map[string]interface{} {
|
|
||||||
v, _ := ToStringMapE(i)
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func ToSlice(i interface{}) []interface{} {
|
|
||||||
v, _ := ToSliceE(i)
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func ToStringSlice(i interface{}) []string {
|
|
||||||
v, _ := ToStringSliceE(i)
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func ToIntSlice(i interface{}) []int {
|
|
||||||
v, _ := ToIntSliceE(i)
|
|
||||||
return v
|
|
||||||
}
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue