mirror of https://github.com/docker/cli.git
cli: add otel sdk tracing and metric providers to the core cli
This adds the code used by buildx and compose into the default CLI program to help normalize the usage of these APIs and allow code reuse between projects. It also allows these projects to benefit from improvements or changes that may be made by another team. At the moment, these APIs are a pretty thin layer on the OTEL SDK. It configures an additional exporter to a docker endpoint that's used for usage collection and is only active if the option is configured in docker desktop. This also upgrades the OTEL version to v1.19 which is the one being used by buildkit, buildx, compose, etc. Signed-off-by: Jonathan A. Sternberg <jonathan.sternberg@docker.com>
This commit is contained in:
parent
b4d03289a7
commit
89db01ef97
|
@ -65,6 +65,7 @@ type Cli interface {
|
||||||
ContextStore() store.Store
|
ContextStore() store.Store
|
||||||
CurrentContext() string
|
CurrentContext() string
|
||||||
DockerEndpoint() docker.Endpoint
|
DockerEndpoint() docker.Endpoint
|
||||||
|
TelemetryClient
|
||||||
}
|
}
|
||||||
|
|
||||||
// DockerCli is an instance the docker command line client.
|
// DockerCli is an instance the docker command line client.
|
||||||
|
@ -85,6 +86,7 @@ type DockerCli struct {
|
||||||
dockerEndpoint docker.Endpoint
|
dockerEndpoint docker.Endpoint
|
||||||
contextStoreConfig store.Config
|
contextStoreConfig store.Config
|
||||||
initTimeout time.Duration
|
initTimeout time.Duration
|
||||||
|
res telemetryResource
|
||||||
|
|
||||||
// baseCtx is the base context used for internal operations. In the future
|
// baseCtx is the base context used for internal operations. In the future
|
||||||
// this may be replaced by explicitly passing a context to functions that
|
// this may be replaced by explicitly passing a context to functions that
|
||||||
|
|
|
@ -0,0 +1,202 @@
|
||||||
|
package command
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/docker/distribution/uuid"
|
||||||
|
"go.opentelemetry.io/otel"
|
||||||
|
"go.opentelemetry.io/otel/metric"
|
||||||
|
sdkmetric "go.opentelemetry.io/otel/sdk/metric"
|
||||||
|
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||||
|
"go.opentelemetry.io/otel/sdk/resource"
|
||||||
|
sdktrace "go.opentelemetry.io/otel/sdk/trace"
|
||||||
|
semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
|
||||||
|
"go.opentelemetry.io/otel/trace"
|
||||||
|
)
|
||||||
|
|
||||||
|
const exportTimeout = 50 * time.Millisecond
|
||||||
|
|
||||||
|
// TracerProvider is an extension of the trace.TracerProvider interface for CLI programs.
|
||||||
|
type TracerProvider interface {
|
||||||
|
trace.TracerProvider
|
||||||
|
ForceFlush(ctx context.Context) error
|
||||||
|
Shutdown(ctx context.Context) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// MeterProvider is an extension of the metric.MeterProvider interface for CLI programs.
|
||||||
|
type MeterProvider interface {
|
||||||
|
metric.MeterProvider
|
||||||
|
ForceFlush(ctx context.Context) error
|
||||||
|
Shutdown(ctx context.Context) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// TelemetryClient provides the methods for using OTEL tracing or metrics.
|
||||||
|
type TelemetryClient interface {
|
||||||
|
// Resource returns the OTEL Resource configured with this TelemetryClient.
|
||||||
|
// This resource may be created lazily, but the resource should be the same
|
||||||
|
// each time this function is invoked.
|
||||||
|
Resource() *resource.Resource
|
||||||
|
|
||||||
|
// TracerProvider returns a TracerProvider. This TracerProvider will be configured
|
||||||
|
// with the default tracing components for a CLI program along with any options given
|
||||||
|
// for the SDK.
|
||||||
|
TracerProvider(ctx context.Context, opts ...sdktrace.TracerProviderOption) TracerProvider
|
||||||
|
|
||||||
|
// MeterProvider returns a MeterProvider. This MeterProvider will be configured
|
||||||
|
// with the default metric components for a CLI program along with any options given
|
||||||
|
// for the SDK.
|
||||||
|
MeterProvider(ctx context.Context, opts ...sdkmetric.Option) MeterProvider
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cli *DockerCli) Resource() *resource.Resource {
|
||||||
|
return cli.res.Get()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cli *DockerCli) TracerProvider(ctx context.Context, opts ...sdktrace.TracerProviderOption) TracerProvider {
|
||||||
|
allOpts := make([]sdktrace.TracerProviderOption, 0, len(opts)+2)
|
||||||
|
allOpts = append(allOpts, sdktrace.WithResource(cli.Resource()))
|
||||||
|
allOpts = append(allOpts, dockerSpanExporter(ctx, cli)...)
|
||||||
|
allOpts = append(allOpts, opts...)
|
||||||
|
return sdktrace.NewTracerProvider(allOpts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cli *DockerCli) MeterProvider(ctx context.Context, opts ...sdkmetric.Option) MeterProvider {
|
||||||
|
allOpts := make([]sdkmetric.Option, 0, len(opts)+2)
|
||||||
|
allOpts = append(allOpts, sdkmetric.WithResource(cli.Resource()))
|
||||||
|
allOpts = append(allOpts, dockerMetricExporter(ctx, cli)...)
|
||||||
|
allOpts = append(allOpts, opts...)
|
||||||
|
return sdkmetric.NewMeterProvider(allOpts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithResourceOptions configures additional options for the default resource. The default
|
||||||
|
// resource will continue to include its default options.
|
||||||
|
func WithResourceOptions(opts ...resource.Option) CLIOption {
|
||||||
|
return func(cli *DockerCli) error {
|
||||||
|
cli.res.AppendOptions(opts...)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithResource overwrites the default resource and prevents its creation.
|
||||||
|
func WithResource(res *resource.Resource) CLIOption {
|
||||||
|
return func(cli *DockerCli) error {
|
||||||
|
cli.res.Set(res)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type telemetryResource struct {
|
||||||
|
res *resource.Resource
|
||||||
|
opts []resource.Option
|
||||||
|
once sync.Once
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *telemetryResource) Set(res *resource.Resource) {
|
||||||
|
r.res = res
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *telemetryResource) Get() *resource.Resource {
|
||||||
|
r.once.Do(r.init)
|
||||||
|
return r.res
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *telemetryResource) init() {
|
||||||
|
if r.res != nil {
|
||||||
|
r.opts = nil
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := append(r.defaultOptions(), r.opts...)
|
||||||
|
res, err := resource.New(context.Background(), opts...)
|
||||||
|
if err != nil {
|
||||||
|
otel.Handle(err)
|
||||||
|
}
|
||||||
|
r.res = res
|
||||||
|
|
||||||
|
// Clear the resource options since they'll never be used again and to allow
|
||||||
|
// the garbage collector to retrieve that memory.
|
||||||
|
r.opts = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *telemetryResource) defaultOptions() []resource.Option {
|
||||||
|
return []resource.Option{
|
||||||
|
resource.WithDetectors(serviceNameDetector{}),
|
||||||
|
resource.WithAttributes(
|
||||||
|
// Use a unique instance id so OTEL knows that each invocation
|
||||||
|
// of the CLI is its own instance. Without this, downstream
|
||||||
|
// OTEL processors may think the same process is restarting
|
||||||
|
// continuously.
|
||||||
|
semconv.ServiceInstanceID(uuid.Generate().String()),
|
||||||
|
),
|
||||||
|
resource.WithFromEnv(),
|
||||||
|
resource.WithTelemetrySDK(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *telemetryResource) AppendOptions(opts ...resource.Option) {
|
||||||
|
if r.res != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
r.opts = append(r.opts, opts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
type serviceNameDetector struct{}
|
||||||
|
|
||||||
|
func (serviceNameDetector) Detect(ctx context.Context) (*resource.Resource, error) {
|
||||||
|
return resource.StringDetector(
|
||||||
|
semconv.SchemaURL,
|
||||||
|
semconv.ServiceNameKey,
|
||||||
|
func() (string, error) {
|
||||||
|
return filepath.Base(os.Args[0]), nil
|
||||||
|
},
|
||||||
|
).Detect(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// cliReader is an implementation of Reader that will automatically
|
||||||
|
// report to a designated Exporter when Shutdown is called.
|
||||||
|
type cliReader struct {
|
||||||
|
sdkmetric.Reader
|
||||||
|
exporter sdkmetric.Exporter
|
||||||
|
}
|
||||||
|
|
||||||
|
func newCLIReader(exp sdkmetric.Exporter) sdkmetric.Reader {
|
||||||
|
reader := sdkmetric.NewManualReader(
|
||||||
|
sdkmetric.WithTemporalitySelector(deltaTemporality),
|
||||||
|
)
|
||||||
|
return &cliReader{
|
||||||
|
Reader: reader,
|
||||||
|
exporter: exp,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *cliReader) Shutdown(ctx context.Context) error {
|
||||||
|
var rm metricdata.ResourceMetrics
|
||||||
|
if err := r.Reader.Collect(ctx, &rm); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Place a pretty tight constraint on the actual reporting.
|
||||||
|
// We don't want CLI metrics to prevent the CLI from exiting
|
||||||
|
// so if there's some kind of issue we need to abort pretty
|
||||||
|
// quickly.
|
||||||
|
ctx, cancel := context.WithTimeout(ctx, exportTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
return r.exporter.Export(ctx, &rm)
|
||||||
|
}
|
||||||
|
|
||||||
|
// deltaTemporality sets the Temporality of every instrument to delta.
|
||||||
|
//
|
||||||
|
// This isn't really needed since we create a unique resource on each invocation,
|
||||||
|
// but it can help with cardinality concerns for downstream processors since they can
|
||||||
|
// perform aggregation for a time interval and then discard the data once that time
|
||||||
|
// period has passed. Cumulative temporality would imply to the downstream processor
|
||||||
|
// that they might receive a successive point and they may unnecessarily keep state
|
||||||
|
// they really shouldn't.
|
||||||
|
func deltaTemporality(_ sdkmetric.InstrumentKind) metricdata.Temporality {
|
||||||
|
return metricdata.DeltaTemporality
|
||||||
|
}
|
|
@ -0,0 +1,127 @@
|
||||||
|
package command
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
"path"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"go.opentelemetry.io/otel"
|
||||||
|
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
|
||||||
|
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
|
||||||
|
sdkmetric "go.opentelemetry.io/otel/sdk/metric"
|
||||||
|
sdktrace "go.opentelemetry.io/otel/sdk/trace"
|
||||||
|
)
|
||||||
|
|
||||||
|
const otelContextFieldName = "otel"
|
||||||
|
|
||||||
|
// dockerExporterOTLPEndpoint retrieves the OTLP endpoint used for the docker reporter
|
||||||
|
// from the current context.
|
||||||
|
func dockerExporterOTLPEndpoint(cli Cli) (endpoint string, secure bool) {
|
||||||
|
meta, err := cli.ContextStore().GetMetadata(cli.CurrentContext())
|
||||||
|
if err != nil {
|
||||||
|
otel.Handle(err)
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
|
||||||
|
var otelCfg any
|
||||||
|
switch m := meta.Metadata.(type) {
|
||||||
|
case DockerContext:
|
||||||
|
otelCfg = m.AdditionalFields[otelContextFieldName]
|
||||||
|
case map[string]any:
|
||||||
|
otelCfg = m[otelContextFieldName]
|
||||||
|
}
|
||||||
|
|
||||||
|
if otelCfg == nil {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
|
||||||
|
otelMap, ok := otelCfg.(map[string]any)
|
||||||
|
if !ok {
|
||||||
|
otel.Handle(errors.Errorf(
|
||||||
|
"unexpected type for field %q: %T (expected: %T)",
|
||||||
|
otelContextFieldName,
|
||||||
|
otelCfg,
|
||||||
|
otelMap,
|
||||||
|
))
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
|
||||||
|
// keys from https://opentelemetry.io/docs/concepts/sdk-configuration/otlp-exporter-configuration/
|
||||||
|
endpoint, ok = otelMap["OTEL_EXPORTER_OTLP_ENDPOINT"].(string)
|
||||||
|
if !ok {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse the endpoint. The docker config expects the endpoint to be
|
||||||
|
// in the form of a URL to match the environment variable, but this
|
||||||
|
// option doesn't correspond directly to WithEndpoint.
|
||||||
|
//
|
||||||
|
// We pretend we're the same as the environment reader.
|
||||||
|
u, err := url.Parse(endpoint)
|
||||||
|
if err != nil {
|
||||||
|
otel.Handle(errors.Errorf("docker otel endpoint is invalid: %s", err))
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
|
||||||
|
switch u.Scheme {
|
||||||
|
case "unix":
|
||||||
|
// Unix sockets are a bit weird. OTEL seems to imply they
|
||||||
|
// can be used as an environment variable and are handled properly,
|
||||||
|
// but they don't seem to be as the behavior of the environment variable
|
||||||
|
// is to strip the scheme from the endpoint, but the underlying implementation
|
||||||
|
// needs the scheme to use the correct resolver.
|
||||||
|
//
|
||||||
|
// We'll just handle this in a special way and add the unix:// back to the endpoint.
|
||||||
|
endpoint = fmt.Sprintf("unix://%s", path.Join(u.Host, u.Path))
|
||||||
|
case "https":
|
||||||
|
secure = true
|
||||||
|
fallthrough
|
||||||
|
case "http":
|
||||||
|
endpoint = path.Join(u.Host, u.Path)
|
||||||
|
}
|
||||||
|
return endpoint, secure
|
||||||
|
}
|
||||||
|
|
||||||
|
func dockerSpanExporter(ctx context.Context, cli Cli) []sdktrace.TracerProviderOption {
|
||||||
|
endpoint, secure := dockerExporterOTLPEndpoint(cli)
|
||||||
|
if endpoint == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := []otlptracegrpc.Option{
|
||||||
|
otlptracegrpc.WithEndpoint(endpoint),
|
||||||
|
}
|
||||||
|
if !secure {
|
||||||
|
opts = append(opts, otlptracegrpc.WithInsecure())
|
||||||
|
}
|
||||||
|
|
||||||
|
exp, err := otlptracegrpc.New(ctx, opts...)
|
||||||
|
if err != nil {
|
||||||
|
otel.Handle(err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return []sdktrace.TracerProviderOption{sdktrace.WithBatcher(exp, sdktrace.WithExportTimeout(exportTimeout))}
|
||||||
|
}
|
||||||
|
|
||||||
|
func dockerMetricExporter(ctx context.Context, cli Cli) []sdkmetric.Option {
|
||||||
|
endpoint, secure := dockerExporterOTLPEndpoint(cli)
|
||||||
|
if endpoint == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := []otlpmetricgrpc.Option{
|
||||||
|
otlpmetricgrpc.WithEndpoint(endpoint),
|
||||||
|
}
|
||||||
|
if !secure {
|
||||||
|
opts = append(opts, otlpmetricgrpc.WithInsecure())
|
||||||
|
}
|
||||||
|
|
||||||
|
exp, err := otlpmetricgrpc.New(ctx, opts...)
|
||||||
|
if err != nil {
|
||||||
|
otel.Handle(err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return []sdkmetric.Option{sdkmetric.WithReader(newCLIReader(exp))}
|
||||||
|
}
|
17
vendor.mod
17
vendor.mod
|
@ -38,6 +38,12 @@ require (
|
||||||
github.com/tonistiigi/go-rosetta v0.0.0-20200727161949-f79598599c5d
|
github.com/tonistiigi/go-rosetta v0.0.0-20200727161949-f79598599c5d
|
||||||
github.com/xeipuuv/gojsonschema v1.2.0
|
github.com/xeipuuv/gojsonschema v1.2.0
|
||||||
go.opentelemetry.io/otel v1.21.0
|
go.opentelemetry.io/otel v1.21.0
|
||||||
|
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.44.0
|
||||||
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0
|
||||||
|
go.opentelemetry.io/otel/metric v1.21.0
|
||||||
|
go.opentelemetry.io/otel/sdk v1.21.0
|
||||||
|
go.opentelemetry.io/otel/sdk/metric v1.21.0
|
||||||
|
go.opentelemetry.io/otel/trace v1.21.0
|
||||||
golang.org/x/sync v0.6.0
|
golang.org/x/sync v0.6.0
|
||||||
golang.org/x/sys v0.16.0
|
golang.org/x/sys v0.16.0
|
||||||
golang.org/x/term v0.15.0
|
golang.org/x/term v0.15.0
|
||||||
|
@ -52,16 +58,18 @@ require (
|
||||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
github.com/Microsoft/go-winio v0.6.1 // indirect
|
||||||
github.com/Microsoft/hcsshim v0.11.4 // indirect
|
github.com/Microsoft/hcsshim v0.11.4 // indirect
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
|
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||||
github.com/containerd/log v0.1.0 // indirect
|
github.com/containerd/log v0.1.0 // indirect
|
||||||
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect
|
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect
|
||||||
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect
|
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect
|
||||||
github.com/docker/go-metrics v0.0.1 // indirect
|
github.com/docker/go-metrics v0.0.1 // indirect
|
||||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||||
github.com/go-logr/logr v1.3.0 // indirect
|
github.com/go-logr/logr v1.4.1 // indirect
|
||||||
github.com/go-logr/stdr v1.2.2 // indirect
|
github.com/go-logr/stdr v1.2.2 // indirect
|
||||||
github.com/golang/protobuf v1.5.4 // indirect
|
github.com/golang/protobuf v1.5.4 // indirect
|
||||||
github.com/gorilla/mux v1.8.1 // indirect
|
github.com/gorilla/mux v1.8.1 // indirect
|
||||||
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||||
github.com/klauspost/compress v1.17.4 // indirect
|
github.com/klauspost/compress v1.17.4 // indirect
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||||
|
@ -78,14 +86,15 @@ require (
|
||||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||||
go.etcd.io/etcd/raft/v3 v3.5.6 // indirect
|
go.etcd.io/etcd/raft/v3 v3.5.6 // indirect
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect
|
||||||
go.opentelemetry.io/otel/metric v1.21.0 // indirect
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 // indirect
|
||||||
go.opentelemetry.io/otel/trace v1.21.0 // indirect
|
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
|
||||||
golang.org/x/crypto v0.17.0 // indirect
|
golang.org/x/crypto v0.17.0 // indirect
|
||||||
golang.org/x/mod v0.14.0 // indirect
|
golang.org/x/mod v0.14.0 // indirect
|
||||||
golang.org/x/net v0.19.0 // indirect
|
golang.org/x/net v0.19.0 // indirect
|
||||||
golang.org/x/time v0.3.0 // indirect
|
golang.org/x/time v0.3.0 // indirect
|
||||||
golang.org/x/tools v0.16.0 // indirect
|
golang.org/x/tools v0.16.0 // indirect
|
||||||
|
google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97 // indirect
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b // indirect
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b // indirect
|
||||||
google.golang.org/grpc v1.59.0 // indirect
|
google.golang.org/grpc v1.60.1 // indirect
|
||||||
google.golang.org/protobuf v1.33.0 // indirect
|
google.golang.org/protobuf v1.33.0 // indirect
|
||||||
)
|
)
|
||||||
|
|
35
vendor.sum
35
vendor.sum
|
@ -87,8 +87,8 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2
|
||||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||||
github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
|
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
|
||||||
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||||
github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs=
|
github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs=
|
||||||
|
@ -101,6 +101,8 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP
|
||||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||||
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
|
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
|
||||||
|
github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo=
|
||||||
|
github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ=
|
||||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
@ -235,8 +237,8 @@ github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k
|
||||||
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
||||||
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
|
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
|
||||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
|
||||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
|
||||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
|
github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
|
||||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||||
|
@ -291,19 +293,27 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJ
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo=
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo=
|
||||||
go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc=
|
go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc=
|
||||||
go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo=
|
go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U=
|
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.44.0 h1:jd0+5t/YynESZqsSyPz+7PAFdEop0dlN0+PkyHYo8oI=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE=
|
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.44.0/go.mod h1:U707O40ee1FpQGyhvqnzmCJm1Wh6OX6GGBVn0E6Uyyk=
|
||||||
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 h1:cl5P5/GIfFh4t6xyruOgJP5QiA1pw4fYYdv6nc6CBWw=
|
||||||
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0/go.mod h1:zgBdWWAu7oEEMC06MMKc5NLbA/1YDXV1sMpSqEeLQLg=
|
||||||
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 h1:tIqheXEFWAZ7O8A7m+J0aPTmpJN3YQ7qetUAdkkkKpk=
|
||||||
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0/go.mod h1:nUeKExfxAQVbiVFn32YXpXZZHZ61Cc3s3Rn1pDBGAb0=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg=
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU=
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU=
|
||||||
go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4=
|
go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4=
|
||||||
go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM=
|
go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM=
|
||||||
go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o=
|
go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8=
|
||||||
go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A=
|
go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E=
|
||||||
|
go.opentelemetry.io/otel/sdk/metric v1.21.0 h1:smhI5oD714d6jHE6Tie36fPx4WDFIg+Y6RfAY4ICcR0=
|
||||||
|
go.opentelemetry.io/otel/sdk/metric v1.21.0/go.mod h1:FJ8RAsoPGv/wYMgBdUJXOm+6pzFY3YdljnXtv1SBE8Q=
|
||||||
go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc=
|
go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc=
|
||||||
go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ=
|
go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ=
|
||||||
go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
|
go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
|
||||||
go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
|
go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
|
||||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||||
|
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||||
|
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||||
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
|
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
|
||||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
|
@ -372,13 +382,14 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
google.golang.org/genproto v0.0.0-20231012201019-e917dd12ba7a h1:fwgW9j3vHirt4ObdHoYNwuO24BEZjSzbh+zPaNWoiY8=
|
google.golang.org/genproto v0.0.0-20231012201019-e917dd12ba7a h1:fwgW9j3vHirt4ObdHoYNwuO24BEZjSzbh+zPaNWoiY8=
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q=
|
google.golang.org/genproto v0.0.0-20231012201019-e917dd12ba7a/go.mod h1:EMfReVxb80Dq1hhioy0sOsY9jCE46YDgHlJ7fWVUWRE=
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk=
|
google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97 h1:W18sezcAYs+3tDZX4F80yctqa12jcP1PUS2gQu1zTPU=
|
||||||
|
google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97/go.mod h1:iargEX0SFPm3xcfMI0d1domjg0ZF4Aa0p2awqyxhvF0=
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b h1:ZlWIi1wSK56/8hn4QcBp/j9M7Gt3U/3hZw3mC7vDICo=
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b h1:ZlWIi1wSK56/8hn4QcBp/j9M7Gt3U/3hZw3mC7vDICo=
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:swOH3j0KzcDDgGUWr+SNpyTen5YrXjS3eyPzFYKc6lc=
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:swOH3j0KzcDDgGUWr+SNpyTen5YrXjS3eyPzFYKc6lc=
|
||||||
google.golang.org/grpc v1.0.5/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
google.golang.org/grpc v1.0.5/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||||
google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk=
|
google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU=
|
||||||
google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98=
|
google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM=
|
||||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||||
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
|
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
|
||||||
|
|
|
@ -0,0 +1,25 @@
|
||||||
|
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||||
|
*.o
|
||||||
|
*.a
|
||||||
|
*.so
|
||||||
|
|
||||||
|
# Folders
|
||||||
|
_obj
|
||||||
|
_test
|
||||||
|
|
||||||
|
# Architecture specific extensions/prefixes
|
||||||
|
*.[568vq]
|
||||||
|
[568vq].out
|
||||||
|
|
||||||
|
*.cgo1.go
|
||||||
|
*.cgo2.c
|
||||||
|
_cgo_defun.c
|
||||||
|
_cgo_gotypes.go
|
||||||
|
_cgo_export.*
|
||||||
|
|
||||||
|
_testmain.go
|
||||||
|
|
||||||
|
*.exe
|
||||||
|
|
||||||
|
# IDEs
|
||||||
|
.idea/
|
|
@ -0,0 +1,20 @@
|
||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2014 Cenk Altı
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||||
|
this software and associated documentation files (the "Software"), to deal in
|
||||||
|
the Software without restriction, including without limitation the rights to
|
||||||
|
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||||
|
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||||
|
subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||||
|
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||||
|
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||||
|
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||||
|
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
@ -0,0 +1,32 @@
|
||||||
|
# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Build Status][travis image]][travis] [![Coverage Status][coveralls image]][coveralls]
|
||||||
|
|
||||||
|
This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client].
|
||||||
|
|
||||||
|
[Exponential backoff][exponential backoff wiki]
|
||||||
|
is an algorithm that uses feedback to multiplicatively decrease the rate of some process,
|
||||||
|
in order to gradually find an acceptable rate.
|
||||||
|
The retries exponentially increase and stop increasing when a certain threshold is met.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
Import path is `github.com/cenkalti/backoff/v4`. Please note the version part at the end.
|
||||||
|
|
||||||
|
Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation.
|
||||||
|
|
||||||
|
## Contributing
|
||||||
|
|
||||||
|
* I would like to keep this library as small as possible.
|
||||||
|
* Please don't send a PR without opening an issue and discussing it first.
|
||||||
|
* If proposed change is not a common use case, I will probably not accept it.
|
||||||
|
|
||||||
|
[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v4
|
||||||
|
[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png
|
||||||
|
[travis]: https://travis-ci.org/cenkalti/backoff
|
||||||
|
[travis image]: https://travis-ci.org/cenkalti/backoff.png?branch=master
|
||||||
|
[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master
|
||||||
|
[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master
|
||||||
|
|
||||||
|
[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java
|
||||||
|
[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff
|
||||||
|
|
||||||
|
[advanced example]: https://pkg.go.dev/github.com/cenkalti/backoff/v4?tab=doc#pkg-examples
|
|
@ -0,0 +1,66 @@
|
||||||
|
// Package backoff implements backoff algorithms for retrying operations.
|
||||||
|
//
|
||||||
|
// Use Retry function for retrying operations that may fail.
|
||||||
|
// If Retry does not meet your needs,
|
||||||
|
// copy/paste the function into your project and modify as you wish.
|
||||||
|
//
|
||||||
|
// There is also Ticker type similar to time.Ticker.
|
||||||
|
// You can use it if you need to work with channels.
|
||||||
|
//
|
||||||
|
// See Examples section below for usage examples.
|
||||||
|
package backoff
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
// BackOff is a backoff policy for retrying an operation.
|
||||||
|
type BackOff interface {
|
||||||
|
// NextBackOff returns the duration to wait before retrying the operation,
|
||||||
|
// or backoff. Stop to indicate that no more retries should be made.
|
||||||
|
//
|
||||||
|
// Example usage:
|
||||||
|
//
|
||||||
|
// duration := backoff.NextBackOff();
|
||||||
|
// if (duration == backoff.Stop) {
|
||||||
|
// // Do not retry operation.
|
||||||
|
// } else {
|
||||||
|
// // Sleep for duration and retry operation.
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
NextBackOff() time.Duration
|
||||||
|
|
||||||
|
// Reset to initial state.
|
||||||
|
Reset()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop indicates that no more retries should be made for use in NextBackOff().
|
||||||
|
const Stop time.Duration = -1
|
||||||
|
|
||||||
|
// ZeroBackOff is a fixed backoff policy whose backoff time is always zero,
|
||||||
|
// meaning that the operation is retried immediately without waiting, indefinitely.
|
||||||
|
type ZeroBackOff struct{}
|
||||||
|
|
||||||
|
func (b *ZeroBackOff) Reset() {}
|
||||||
|
|
||||||
|
func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 }
|
||||||
|
|
||||||
|
// StopBackOff is a fixed backoff policy that always returns backoff.Stop for
|
||||||
|
// NextBackOff(), meaning that the operation should never be retried.
|
||||||
|
type StopBackOff struct{}
|
||||||
|
|
||||||
|
func (b *StopBackOff) Reset() {}
|
||||||
|
|
||||||
|
func (b *StopBackOff) NextBackOff() time.Duration { return Stop }
|
||||||
|
|
||||||
|
// ConstantBackOff is a backoff policy that always returns the same backoff delay.
|
||||||
|
// This is in contrast to an exponential backoff policy,
|
||||||
|
// which returns a delay that grows longer as you call NextBackOff() over and over again.
|
||||||
|
type ConstantBackOff struct {
|
||||||
|
Interval time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *ConstantBackOff) Reset() {}
|
||||||
|
func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval }
|
||||||
|
|
||||||
|
func NewConstantBackOff(d time.Duration) *ConstantBackOff {
|
||||||
|
return &ConstantBackOff{Interval: d}
|
||||||
|
}
|
|
@ -0,0 +1,62 @@
|
||||||
|
package backoff
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// BackOffContext is a backoff policy that stops retrying after the context
|
||||||
|
// is canceled.
|
||||||
|
type BackOffContext interface { // nolint: golint
|
||||||
|
BackOff
|
||||||
|
Context() context.Context
|
||||||
|
}
|
||||||
|
|
||||||
|
type backOffContext struct {
|
||||||
|
BackOff
|
||||||
|
ctx context.Context
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithContext returns a BackOffContext with context ctx
|
||||||
|
//
|
||||||
|
// ctx must not be nil
|
||||||
|
func WithContext(b BackOff, ctx context.Context) BackOffContext { // nolint: golint
|
||||||
|
if ctx == nil {
|
||||||
|
panic("nil context")
|
||||||
|
}
|
||||||
|
|
||||||
|
if b, ok := b.(*backOffContext); ok {
|
||||||
|
return &backOffContext{
|
||||||
|
BackOff: b.BackOff,
|
||||||
|
ctx: ctx,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &backOffContext{
|
||||||
|
BackOff: b,
|
||||||
|
ctx: ctx,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getContext(b BackOff) context.Context {
|
||||||
|
if cb, ok := b.(BackOffContext); ok {
|
||||||
|
return cb.Context()
|
||||||
|
}
|
||||||
|
if tb, ok := b.(*backOffTries); ok {
|
||||||
|
return getContext(tb.delegate)
|
||||||
|
}
|
||||||
|
return context.Background()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *backOffContext) Context() context.Context {
|
||||||
|
return b.ctx
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *backOffContext) NextBackOff() time.Duration {
|
||||||
|
select {
|
||||||
|
case <-b.ctx.Done():
|
||||||
|
return Stop
|
||||||
|
default:
|
||||||
|
return b.BackOff.NextBackOff()
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,161 @@
|
||||||
|
package backoff
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/rand"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
ExponentialBackOff is a backoff implementation that increases the backoff
|
||||||
|
period for each retry attempt using a randomization function that grows exponentially.
|
||||||
|
|
||||||
|
NextBackOff() is calculated using the following formula:
|
||||||
|
|
||||||
|
randomized interval =
|
||||||
|
RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor])
|
||||||
|
|
||||||
|
In other words NextBackOff() will range between the randomization factor
|
||||||
|
percentage below and above the retry interval.
|
||||||
|
|
||||||
|
For example, given the following parameters:
|
||||||
|
|
||||||
|
RetryInterval = 2
|
||||||
|
RandomizationFactor = 0.5
|
||||||
|
Multiplier = 2
|
||||||
|
|
||||||
|
the actual backoff period used in the next retry attempt will range between 1 and 3 seconds,
|
||||||
|
multiplied by the exponential, that is, between 2 and 6 seconds.
|
||||||
|
|
||||||
|
Note: MaxInterval caps the RetryInterval and not the randomized interval.
|
||||||
|
|
||||||
|
If the time elapsed since an ExponentialBackOff instance is created goes past the
|
||||||
|
MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop.
|
||||||
|
|
||||||
|
The elapsed time can be reset by calling Reset().
|
||||||
|
|
||||||
|
Example: Given the following default arguments, for 10 tries the sequence will be,
|
||||||
|
and assuming we go over the MaxElapsedTime on the 10th try:
|
||||||
|
|
||||||
|
Request # RetryInterval (seconds) Randomized Interval (seconds)
|
||||||
|
|
||||||
|
1 0.5 [0.25, 0.75]
|
||||||
|
2 0.75 [0.375, 1.125]
|
||||||
|
3 1.125 [0.562, 1.687]
|
||||||
|
4 1.687 [0.8435, 2.53]
|
||||||
|
5 2.53 [1.265, 3.795]
|
||||||
|
6 3.795 [1.897, 5.692]
|
||||||
|
7 5.692 [2.846, 8.538]
|
||||||
|
8 8.538 [4.269, 12.807]
|
||||||
|
9 12.807 [6.403, 19.210]
|
||||||
|
10 19.210 backoff.Stop
|
||||||
|
|
||||||
|
Note: Implementation is not thread-safe.
|
||||||
|
*/
|
||||||
|
type ExponentialBackOff struct {
|
||||||
|
InitialInterval time.Duration
|
||||||
|
RandomizationFactor float64
|
||||||
|
Multiplier float64
|
||||||
|
MaxInterval time.Duration
|
||||||
|
// After MaxElapsedTime the ExponentialBackOff returns Stop.
|
||||||
|
// It never stops if MaxElapsedTime == 0.
|
||||||
|
MaxElapsedTime time.Duration
|
||||||
|
Stop time.Duration
|
||||||
|
Clock Clock
|
||||||
|
|
||||||
|
currentInterval time.Duration
|
||||||
|
startTime time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clock is an interface that returns current time for BackOff.
|
||||||
|
type Clock interface {
|
||||||
|
Now() time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// Default values for ExponentialBackOff.
|
||||||
|
const (
|
||||||
|
DefaultInitialInterval = 500 * time.Millisecond
|
||||||
|
DefaultRandomizationFactor = 0.5
|
||||||
|
DefaultMultiplier = 1.5
|
||||||
|
DefaultMaxInterval = 60 * time.Second
|
||||||
|
DefaultMaxElapsedTime = 15 * time.Minute
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewExponentialBackOff creates an instance of ExponentialBackOff using default values.
|
||||||
|
func NewExponentialBackOff() *ExponentialBackOff {
|
||||||
|
b := &ExponentialBackOff{
|
||||||
|
InitialInterval: DefaultInitialInterval,
|
||||||
|
RandomizationFactor: DefaultRandomizationFactor,
|
||||||
|
Multiplier: DefaultMultiplier,
|
||||||
|
MaxInterval: DefaultMaxInterval,
|
||||||
|
MaxElapsedTime: DefaultMaxElapsedTime,
|
||||||
|
Stop: Stop,
|
||||||
|
Clock: SystemClock,
|
||||||
|
}
|
||||||
|
b.Reset()
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
type systemClock struct{}
|
||||||
|
|
||||||
|
func (t systemClock) Now() time.Time {
|
||||||
|
return time.Now()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SystemClock implements Clock interface that uses time.Now().
|
||||||
|
var SystemClock = systemClock{}
|
||||||
|
|
||||||
|
// Reset the interval back to the initial retry interval and restarts the timer.
|
||||||
|
// Reset must be called before using b.
|
||||||
|
func (b *ExponentialBackOff) Reset() {
|
||||||
|
b.currentInterval = b.InitialInterval
|
||||||
|
b.startTime = b.Clock.Now()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NextBackOff calculates the next backoff interval using the formula:
|
||||||
|
// Randomized interval = RetryInterval * (1 ± RandomizationFactor)
|
||||||
|
func (b *ExponentialBackOff) NextBackOff() time.Duration {
|
||||||
|
// Make sure we have not gone over the maximum elapsed time.
|
||||||
|
elapsed := b.GetElapsedTime()
|
||||||
|
next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval)
|
||||||
|
b.incrementCurrentInterval()
|
||||||
|
if b.MaxElapsedTime != 0 && elapsed+next > b.MaxElapsedTime {
|
||||||
|
return b.Stop
|
||||||
|
}
|
||||||
|
return next
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance
|
||||||
|
// is created and is reset when Reset() is called.
|
||||||
|
//
|
||||||
|
// The elapsed time is computed using time.Now().UnixNano(). It is
|
||||||
|
// safe to call even while the backoff policy is used by a running
|
||||||
|
// ticker.
|
||||||
|
func (b *ExponentialBackOff) GetElapsedTime() time.Duration {
|
||||||
|
return b.Clock.Now().Sub(b.startTime)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Increments the current interval by multiplying it with the multiplier.
|
||||||
|
func (b *ExponentialBackOff) incrementCurrentInterval() {
|
||||||
|
// Check for overflow, if overflow is detected set the current interval to the max interval.
|
||||||
|
if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier {
|
||||||
|
b.currentInterval = b.MaxInterval
|
||||||
|
} else {
|
||||||
|
b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns a random value from the following interval:
|
||||||
|
// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval].
|
||||||
|
func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration {
|
||||||
|
if randomizationFactor == 0 {
|
||||||
|
return currentInterval // make sure no randomness is used when randomizationFactor is 0.
|
||||||
|
}
|
||||||
|
var delta = randomizationFactor * float64(currentInterval)
|
||||||
|
var minInterval = float64(currentInterval) - delta
|
||||||
|
var maxInterval = float64(currentInterval) + delta
|
||||||
|
|
||||||
|
// Get a random value from the range [minInterval, maxInterval].
|
||||||
|
// The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then
|
||||||
|
// we want a 33% chance for selecting either 1, 2 or 3.
|
||||||
|
return time.Duration(minInterval + (random * (maxInterval - minInterval + 1)))
|
||||||
|
}
|
|
@ -0,0 +1,146 @@
|
||||||
|
package backoff
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// An OperationWithData is executing by RetryWithData() or RetryNotifyWithData().
|
||||||
|
// The operation will be retried using a backoff policy if it returns an error.
|
||||||
|
type OperationWithData[T any] func() (T, error)
|
||||||
|
|
||||||
|
// An Operation is executing by Retry() or RetryNotify().
|
||||||
|
// The operation will be retried using a backoff policy if it returns an error.
|
||||||
|
type Operation func() error
|
||||||
|
|
||||||
|
func (o Operation) withEmptyData() OperationWithData[struct{}] {
|
||||||
|
return func() (struct{}, error) {
|
||||||
|
return struct{}{}, o()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Notify is a notify-on-error function. It receives an operation error and
|
||||||
|
// backoff delay if the operation failed (with an error).
|
||||||
|
//
|
||||||
|
// NOTE that if the backoff policy stated to stop retrying,
|
||||||
|
// the notify function isn't called.
|
||||||
|
type Notify func(error, time.Duration)
|
||||||
|
|
||||||
|
// Retry the operation o until it does not return error or BackOff stops.
|
||||||
|
// o is guaranteed to be run at least once.
|
||||||
|
//
|
||||||
|
// If o returns a *PermanentError, the operation is not retried, and the
|
||||||
|
// wrapped error is returned.
|
||||||
|
//
|
||||||
|
// Retry sleeps the goroutine for the duration returned by BackOff after a
|
||||||
|
// failed operation returns.
|
||||||
|
func Retry(o Operation, b BackOff) error {
|
||||||
|
return RetryNotify(o, b, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RetryWithData is like Retry but returns data in the response too.
|
||||||
|
func RetryWithData[T any](o OperationWithData[T], b BackOff) (T, error) {
|
||||||
|
return RetryNotifyWithData(o, b, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RetryNotify calls notify function with the error and wait duration
|
||||||
|
// for each failed attempt before sleep.
|
||||||
|
func RetryNotify(operation Operation, b BackOff, notify Notify) error {
|
||||||
|
return RetryNotifyWithTimer(operation, b, notify, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RetryNotifyWithData is like RetryNotify but returns data in the response too.
|
||||||
|
func RetryNotifyWithData[T any](operation OperationWithData[T], b BackOff, notify Notify) (T, error) {
|
||||||
|
return doRetryNotify(operation, b, notify, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer
|
||||||
|
// for each failed attempt before sleep.
|
||||||
|
// A default timer that uses system timer is used when nil is passed.
|
||||||
|
func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error {
|
||||||
|
_, err := doRetryNotify(operation.withEmptyData(), b, notify, t)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// RetryNotifyWithTimerAndData is like RetryNotifyWithTimer but returns data in the response too.
|
||||||
|
func RetryNotifyWithTimerAndData[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) {
|
||||||
|
return doRetryNotify(operation, b, notify, t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func doRetryNotify[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) {
|
||||||
|
var (
|
||||||
|
err error
|
||||||
|
next time.Duration
|
||||||
|
res T
|
||||||
|
)
|
||||||
|
if t == nil {
|
||||||
|
t = &defaultTimer{}
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
t.Stop()
|
||||||
|
}()
|
||||||
|
|
||||||
|
ctx := getContext(b)
|
||||||
|
|
||||||
|
b.Reset()
|
||||||
|
for {
|
||||||
|
res, err = operation()
|
||||||
|
if err == nil {
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var permanent *PermanentError
|
||||||
|
if errors.As(err, &permanent) {
|
||||||
|
return res, permanent.Err
|
||||||
|
}
|
||||||
|
|
||||||
|
if next = b.NextBackOff(); next == Stop {
|
||||||
|
if cerr := ctx.Err(); cerr != nil {
|
||||||
|
return res, cerr
|
||||||
|
}
|
||||||
|
|
||||||
|
return res, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if notify != nil {
|
||||||
|
notify(err, next)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Start(next)
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return res, ctx.Err()
|
||||||
|
case <-t.C():
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// PermanentError signals that the operation should not be retried.
|
||||||
|
type PermanentError struct {
|
||||||
|
Err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *PermanentError) Error() string {
|
||||||
|
return e.Err.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *PermanentError) Unwrap() error {
|
||||||
|
return e.Err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *PermanentError) Is(target error) bool {
|
||||||
|
_, ok := target.(*PermanentError)
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// Permanent wraps the given err in a *PermanentError.
|
||||||
|
func Permanent(err error) error {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &PermanentError{
|
||||||
|
Err: err,
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,97 @@
|
||||||
|
package backoff
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff.
|
||||||
|
//
|
||||||
|
// Ticks will continue to arrive when the previous operation is still running,
|
||||||
|
// so operations that take a while to fail could run in quick succession.
|
||||||
|
type Ticker struct {
|
||||||
|
C <-chan time.Time
|
||||||
|
c chan time.Time
|
||||||
|
b BackOff
|
||||||
|
ctx context.Context
|
||||||
|
timer Timer
|
||||||
|
stop chan struct{}
|
||||||
|
stopOnce sync.Once
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTicker returns a new Ticker containing a channel that will send
|
||||||
|
// the time at times specified by the BackOff argument. Ticker is
|
||||||
|
// guaranteed to tick at least once. The channel is closed when Stop
|
||||||
|
// method is called or BackOff stops. It is not safe to manipulate the
|
||||||
|
// provided backoff policy (notably calling NextBackOff or Reset)
|
||||||
|
// while the ticker is running.
|
||||||
|
func NewTicker(b BackOff) *Ticker {
|
||||||
|
return NewTickerWithTimer(b, &defaultTimer{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTickerWithTimer returns a new Ticker with a custom timer.
|
||||||
|
// A default timer that uses system timer is used when nil is passed.
|
||||||
|
func NewTickerWithTimer(b BackOff, timer Timer) *Ticker {
|
||||||
|
if timer == nil {
|
||||||
|
timer = &defaultTimer{}
|
||||||
|
}
|
||||||
|
c := make(chan time.Time)
|
||||||
|
t := &Ticker{
|
||||||
|
C: c,
|
||||||
|
c: c,
|
||||||
|
b: b,
|
||||||
|
ctx: getContext(b),
|
||||||
|
timer: timer,
|
||||||
|
stop: make(chan struct{}),
|
||||||
|
}
|
||||||
|
t.b.Reset()
|
||||||
|
go t.run()
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop turns off a ticker. After Stop, no more ticks will be sent.
|
||||||
|
func (t *Ticker) Stop() {
|
||||||
|
t.stopOnce.Do(func() { close(t.stop) })
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Ticker) run() {
|
||||||
|
c := t.c
|
||||||
|
defer close(c)
|
||||||
|
|
||||||
|
// Ticker is guaranteed to tick at least once.
|
||||||
|
afterC := t.send(time.Now())
|
||||||
|
|
||||||
|
for {
|
||||||
|
if afterC == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case tick := <-afterC:
|
||||||
|
afterC = t.send(tick)
|
||||||
|
case <-t.stop:
|
||||||
|
t.c = nil // Prevent future ticks from being sent to the channel.
|
||||||
|
return
|
||||||
|
case <-t.ctx.Done():
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Ticker) send(tick time.Time) <-chan time.Time {
|
||||||
|
select {
|
||||||
|
case t.c <- tick:
|
||||||
|
case <-t.stop:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
next := t.b.NextBackOff()
|
||||||
|
if next == Stop {
|
||||||
|
t.Stop()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
t.timer.Start(next)
|
||||||
|
return t.timer.C()
|
||||||
|
}
|
|
@ -0,0 +1,35 @@
|
||||||
|
package backoff
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
type Timer interface {
|
||||||
|
Start(duration time.Duration)
|
||||||
|
Stop()
|
||||||
|
C() <-chan time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// defaultTimer implements Timer interface using time.Timer
|
||||||
|
type defaultTimer struct {
|
||||||
|
timer *time.Timer
|
||||||
|
}
|
||||||
|
|
||||||
|
// C returns the timers channel which receives the current time when the timer fires.
|
||||||
|
func (t *defaultTimer) C() <-chan time.Time {
|
||||||
|
return t.timer.C
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start starts the timer to fire after the given duration
|
||||||
|
func (t *defaultTimer) Start(duration time.Duration) {
|
||||||
|
if t.timer == nil {
|
||||||
|
t.timer = time.NewTimer(duration)
|
||||||
|
} else {
|
||||||
|
t.timer.Reset(duration)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop is called when the timer is not used anymore and resources may be freed.
|
||||||
|
func (t *defaultTimer) Stop() {
|
||||||
|
if t.timer != nil {
|
||||||
|
t.timer.Stop()
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,38 @@
|
||||||
|
package backoff
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
/*
|
||||||
|
WithMaxRetries creates a wrapper around another BackOff, which will
|
||||||
|
return Stop if NextBackOff() has been called too many times since
|
||||||
|
the last time Reset() was called
|
||||||
|
|
||||||
|
Note: Implementation is not thread-safe.
|
||||||
|
*/
|
||||||
|
func WithMaxRetries(b BackOff, max uint64) BackOff {
|
||||||
|
return &backOffTries{delegate: b, maxTries: max}
|
||||||
|
}
|
||||||
|
|
||||||
|
type backOffTries struct {
|
||||||
|
delegate BackOff
|
||||||
|
maxTries uint64
|
||||||
|
numTries uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *backOffTries) NextBackOff() time.Duration {
|
||||||
|
if b.maxTries == 0 {
|
||||||
|
return Stop
|
||||||
|
}
|
||||||
|
if b.maxTries > 0 {
|
||||||
|
if b.maxTries <= b.numTries {
|
||||||
|
return Stop
|
||||||
|
}
|
||||||
|
b.numTries++
|
||||||
|
}
|
||||||
|
return b.delegate.NextBackOff()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *backOffTries) Reset() {
|
||||||
|
b.numTries = 0
|
||||||
|
b.delegate.Reset()
|
||||||
|
}
|
|
@ -91,11 +91,12 @@ logr design but also left out some parts and changed others:
|
||||||
| Adding a name to a logger | `WithName` | no API |
|
| Adding a name to a logger | `WithName` | no API |
|
||||||
| Modify verbosity of log entries in a call chain | `V` | no API |
|
| Modify verbosity of log entries in a call chain | `V` | no API |
|
||||||
| Grouping of key/value pairs | not supported | `WithGroup`, `GroupValue` |
|
| Grouping of key/value pairs | not supported | `WithGroup`, `GroupValue` |
|
||||||
|
| Pass context for extracting additional values | no API | API variants like `InfoCtx` |
|
||||||
|
|
||||||
The high-level slog API is explicitly meant to be one of many different APIs
|
The high-level slog API is explicitly meant to be one of many different APIs
|
||||||
that can be layered on top of a shared `slog.Handler`. logr is one such
|
that can be layered on top of a shared `slog.Handler`. logr is one such
|
||||||
alternative API, with [interoperability](#slog-interoperability) provided by the [`slogr`](slogr)
|
alternative API, with [interoperability](#slog-interoperability) provided by
|
||||||
package.
|
some conversion functions.
|
||||||
|
|
||||||
### Inspiration
|
### Inspiration
|
||||||
|
|
||||||
|
@ -145,24 +146,24 @@ There are implementations for the following logging libraries:
|
||||||
## slog interoperability
|
## slog interoperability
|
||||||
|
|
||||||
Interoperability goes both ways, using the `logr.Logger` API with a `slog.Handler`
|
Interoperability goes both ways, using the `logr.Logger` API with a `slog.Handler`
|
||||||
and using the `slog.Logger` API with a `logr.LogSink`. [slogr](./slogr) provides `NewLogr` and
|
and using the `slog.Logger` API with a `logr.LogSink`. `FromSlogHandler` and
|
||||||
`NewSlogHandler` API calls to convert between a `logr.Logger` and a `slog.Handler`.
|
`ToSlogHandler` convert between a `logr.Logger` and a `slog.Handler`.
|
||||||
As usual, `slog.New` can be used to wrap such a `slog.Handler` in the high-level
|
As usual, `slog.New` can be used to wrap such a `slog.Handler` in the high-level
|
||||||
slog API. `slogr` itself leaves that to the caller.
|
slog API.
|
||||||
|
|
||||||
## Using a `logr.Sink` as backend for slog
|
### Using a `logr.LogSink` as backend for slog
|
||||||
|
|
||||||
Ideally, a logr sink implementation should support both logr and slog by
|
Ideally, a logr sink implementation should support both logr and slog by
|
||||||
implementing both the normal logr interface(s) and `slogr.SlogSink`. Because
|
implementing both the normal logr interface(s) and `SlogSink`. Because
|
||||||
of a conflict in the parameters of the common `Enabled` method, it is [not
|
of a conflict in the parameters of the common `Enabled` method, it is [not
|
||||||
possible to implement both slog.Handler and logr.Sink in the same
|
possible to implement both slog.Handler and logr.Sink in the same
|
||||||
type](https://github.com/golang/go/issues/59110).
|
type](https://github.com/golang/go/issues/59110).
|
||||||
|
|
||||||
If both are supported, log calls can go from the high-level APIs to the backend
|
If both are supported, log calls can go from the high-level APIs to the backend
|
||||||
without the need to convert parameters. `NewLogr` and `NewSlogHandler` can
|
without the need to convert parameters. `FromSlogHandler` and `ToSlogHandler` can
|
||||||
convert back and forth without adding additional wrappers, with one exception:
|
convert back and forth without adding additional wrappers, with one exception:
|
||||||
when `Logger.V` was used to adjust the verbosity for a `slog.Handler`, then
|
when `Logger.V` was used to adjust the verbosity for a `slog.Handler`, then
|
||||||
`NewSlogHandler` has to use a wrapper which adjusts the verbosity for future
|
`ToSlogHandler` has to use a wrapper which adjusts the verbosity for future
|
||||||
log calls.
|
log calls.
|
||||||
|
|
||||||
Such an implementation should also support values that implement specific
|
Such an implementation should also support values that implement specific
|
||||||
|
@ -187,13 +188,13 @@ Not supporting slog has several drawbacks:
|
||||||
These drawbacks are severe enough that applications using a mixture of slog and
|
These drawbacks are severe enough that applications using a mixture of slog and
|
||||||
logr should switch to a different backend.
|
logr should switch to a different backend.
|
||||||
|
|
||||||
## Using a `slog.Handler` as backend for logr
|
### Using a `slog.Handler` as backend for logr
|
||||||
|
|
||||||
Using a plain `slog.Handler` without support for logr works better than the
|
Using a plain `slog.Handler` without support for logr works better than the
|
||||||
other direction:
|
other direction:
|
||||||
- All logr verbosity levels can be mapped 1:1 to their corresponding slog level
|
- All logr verbosity levels can be mapped 1:1 to their corresponding slog level
|
||||||
by negating them.
|
by negating them.
|
||||||
- Stack unwinding is done by the `slogr.SlogSink` and the resulting program
|
- Stack unwinding is done by the `SlogSink` and the resulting program
|
||||||
counter is passed to the `slog.Handler`.
|
counter is passed to the `slog.Handler`.
|
||||||
- Names added via `Logger.WithName` are gathered and recorded in an additional
|
- Names added via `Logger.WithName` are gathered and recorded in an additional
|
||||||
attribute with `logger` as key and the names separated by slash as value.
|
attribute with `logger` as key and the names separated by slash as value.
|
||||||
|
@ -205,27 +206,39 @@ ideally support both `logr.Marshaler` and `slog.Valuer`. If compatibility
|
||||||
with logr implementations without slog support is not important, then
|
with logr implementations without slog support is not important, then
|
||||||
`slog.Valuer` is sufficient.
|
`slog.Valuer` is sufficient.
|
||||||
|
|
||||||
## Context support for slog
|
### Context support for slog
|
||||||
|
|
||||||
Storing a logger in a `context.Context` is not supported by
|
Storing a logger in a `context.Context` is not supported by
|
||||||
slog. `logr.NewContext` and `logr.FromContext` can be used with slog like this
|
slog. `NewContextWithSlogLogger` and `FromContextAsSlogLogger` can be
|
||||||
to fill this gap:
|
used to fill this gap. They store and retrieve a `slog.Logger` pointer
|
||||||
|
under the same context key that is also used by `NewContext` and
|
||||||
|
`FromContext` for `logr.Logger` value.
|
||||||
|
|
||||||
func HandlerFromContext(ctx context.Context) slog.Handler {
|
When `NewContextWithSlogLogger` is followed by `FromContext`, the latter will
|
||||||
logger, err := logr.FromContext(ctx)
|
automatically convert the `slog.Logger` to a
|
||||||
if err == nil {
|
`logr.Logger`. `FromContextAsSlogLogger` does the same for the other direction.
|
||||||
return slogr.NewSlogHandler(logger)
|
|
||||||
}
|
|
||||||
return slog.Default().Handler()
|
|
||||||
}
|
|
||||||
|
|
||||||
func ContextWithHandler(ctx context.Context, handler slog.Handler) context.Context {
|
With this approach, binaries which use either slog or logr are as efficient as
|
||||||
return logr.NewContext(ctx, slogr.NewLogr(handler))
|
possible with no unnecessary allocations. This is also why the API stores a
|
||||||
}
|
`slog.Logger` pointer: when storing a `slog.Handler`, creating a `slog.Logger`
|
||||||
|
on retrieval would need to allocate one.
|
||||||
|
|
||||||
The downside is that storing and retrieving a `slog.Handler` needs more
|
The downside is that switching back and forth needs more allocations. Because
|
||||||
allocations compared to using a `logr.Logger`. Therefore the recommendation is
|
logr is the API that is already in use by different packages, in particular
|
||||||
to use the `logr.Logger` API in code which uses contextual logging.
|
Kubernetes, the recommendation is to use the `logr.Logger` API in code which
|
||||||
|
uses contextual logging.
|
||||||
|
|
||||||
|
An alternative to adding values to a logger and storing that logger in the
|
||||||
|
context is to store the values in the context and to configure a logging
|
||||||
|
backend to extract those values when emitting log entries. This only works when
|
||||||
|
log calls are passed the context, which is not supported by the logr API.
|
||||||
|
|
||||||
|
With the slog API, it is possible, but not
|
||||||
|
required. https://github.com/veqryn/slog-context is a package for slog which
|
||||||
|
provides additional support code for this approach. It also contains wrappers
|
||||||
|
for the context functions in logr, so developers who prefer to not use the logr
|
||||||
|
APIs directly can use those instead and the resulting code will still be
|
||||||
|
interoperable with logr.
|
||||||
|
|
||||||
## FAQ
|
## FAQ
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,33 @@
|
||||||
|
/*
|
||||||
|
Copyright 2023 The logr Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package logr
|
||||||
|
|
||||||
|
// contextKey is how we find Loggers in a context.Context. With Go < 1.21,
|
||||||
|
// the value is always a Logger value. With Go >= 1.21, the value can be a
|
||||||
|
// Logger value or a slog.Logger pointer.
|
||||||
|
type contextKey struct{}
|
||||||
|
|
||||||
|
// notFoundError exists to carry an IsNotFound method.
|
||||||
|
type notFoundError struct{}
|
||||||
|
|
||||||
|
func (notFoundError) Error() string {
|
||||||
|
return "no logr.Logger was present"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (notFoundError) IsNotFound() bool {
|
||||||
|
return true
|
||||||
|
}
|
|
@ -0,0 +1,49 @@
|
||||||
|
//go:build !go1.21
|
||||||
|
// +build !go1.21
|
||||||
|
|
||||||
|
/*
|
||||||
|
Copyright 2019 The logr Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package logr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FromContext returns a Logger from ctx or an error if no Logger is found.
|
||||||
|
func FromContext(ctx context.Context) (Logger, error) {
|
||||||
|
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return Logger{}, notFoundError{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
|
||||||
|
// returns a Logger that discards all log messages.
|
||||||
|
func FromContextOrDiscard(ctx context.Context) Logger {
|
||||||
|
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
return Discard()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewContext returns a new Context, derived from ctx, which carries the
|
||||||
|
// provided Logger.
|
||||||
|
func NewContext(ctx context.Context, logger Logger) context.Context {
|
||||||
|
return context.WithValue(ctx, contextKey{}, logger)
|
||||||
|
}
|
|
@ -0,0 +1,83 @@
|
||||||
|
//go:build go1.21
|
||||||
|
// +build go1.21
|
||||||
|
|
||||||
|
/*
|
||||||
|
Copyright 2019 The logr Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package logr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FromContext returns a Logger from ctx or an error if no Logger is found.
|
||||||
|
func FromContext(ctx context.Context) (Logger, error) {
|
||||||
|
v := ctx.Value(contextKey{})
|
||||||
|
if v == nil {
|
||||||
|
return Logger{}, notFoundError{}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch v := v.(type) {
|
||||||
|
case Logger:
|
||||||
|
return v, nil
|
||||||
|
case *slog.Logger:
|
||||||
|
return FromSlogHandler(v.Handler()), nil
|
||||||
|
default:
|
||||||
|
// Not reached.
|
||||||
|
panic(fmt.Sprintf("unexpected value type for logr context key: %T", v))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromContextAsSlogLogger returns a slog.Logger from ctx or nil if no such Logger is found.
|
||||||
|
func FromContextAsSlogLogger(ctx context.Context) *slog.Logger {
|
||||||
|
v := ctx.Value(contextKey{})
|
||||||
|
if v == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
switch v := v.(type) {
|
||||||
|
case Logger:
|
||||||
|
return slog.New(ToSlogHandler(v))
|
||||||
|
case *slog.Logger:
|
||||||
|
return v
|
||||||
|
default:
|
||||||
|
// Not reached.
|
||||||
|
panic(fmt.Sprintf("unexpected value type for logr context key: %T", v))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
|
||||||
|
// returns a Logger that discards all log messages.
|
||||||
|
func FromContextOrDiscard(ctx context.Context) Logger {
|
||||||
|
if logger, err := FromContext(ctx); err == nil {
|
||||||
|
return logger
|
||||||
|
}
|
||||||
|
return Discard()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewContext returns a new Context, derived from ctx, which carries the
|
||||||
|
// provided Logger.
|
||||||
|
func NewContext(ctx context.Context, logger Logger) context.Context {
|
||||||
|
return context.WithValue(ctx, contextKey{}, logger)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewContextWithSlogLogger returns a new Context, derived from ctx, which carries the
|
||||||
|
// provided slog.Logger.
|
||||||
|
func NewContextWithSlogLogger(ctx context.Context, logger *slog.Logger) context.Context {
|
||||||
|
return context.WithValue(ctx, contextKey{}, logger)
|
||||||
|
}
|
|
@ -100,6 +100,11 @@ type Options struct {
|
||||||
// details, see docs for Go's time.Layout.
|
// details, see docs for Go's time.Layout.
|
||||||
TimestampFormat string
|
TimestampFormat string
|
||||||
|
|
||||||
|
// LogInfoLevel tells funcr what key to use to log the info level.
|
||||||
|
// If not specified, the info level will be logged as "level".
|
||||||
|
// If this is set to "", the info level will not be logged at all.
|
||||||
|
LogInfoLevel *string
|
||||||
|
|
||||||
// Verbosity tells funcr which V logs to produce. Higher values enable
|
// Verbosity tells funcr which V logs to produce. Higher values enable
|
||||||
// more logs. Info logs at or below this level will be written, while logs
|
// more logs. Info logs at or below this level will be written, while logs
|
||||||
// above this level will be discarded.
|
// above this level will be discarded.
|
||||||
|
@ -213,6 +218,10 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter {
|
||||||
if opts.MaxLogDepth == 0 {
|
if opts.MaxLogDepth == 0 {
|
||||||
opts.MaxLogDepth = defaultMaxLogDepth
|
opts.MaxLogDepth = defaultMaxLogDepth
|
||||||
}
|
}
|
||||||
|
if opts.LogInfoLevel == nil {
|
||||||
|
opts.LogInfoLevel = new(string)
|
||||||
|
*opts.LogInfoLevel = "level"
|
||||||
|
}
|
||||||
f := Formatter{
|
f := Formatter{
|
||||||
outputFormat: outfmt,
|
outputFormat: outfmt,
|
||||||
prefix: "",
|
prefix: "",
|
||||||
|
@ -227,12 +236,15 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter {
|
||||||
// implementation. It should be constructed with NewFormatter. Some of
|
// implementation. It should be constructed with NewFormatter. Some of
|
||||||
// its methods directly implement logr.LogSink.
|
// its methods directly implement logr.LogSink.
|
||||||
type Formatter struct {
|
type Formatter struct {
|
||||||
outputFormat outputFormat
|
outputFormat outputFormat
|
||||||
prefix string
|
prefix string
|
||||||
values []any
|
values []any
|
||||||
valuesStr string
|
valuesStr string
|
||||||
depth int
|
parentValuesStr string
|
||||||
opts *Options
|
depth int
|
||||||
|
opts *Options
|
||||||
|
group string // for slog groups
|
||||||
|
groupDepth int
|
||||||
}
|
}
|
||||||
|
|
||||||
// outputFormat indicates which outputFormat to use.
|
// outputFormat indicates which outputFormat to use.
|
||||||
|
@ -253,33 +265,62 @@ func (f Formatter) render(builtins, args []any) string {
|
||||||
// Empirically bytes.Buffer is faster than strings.Builder for this.
|
// Empirically bytes.Buffer is faster than strings.Builder for this.
|
||||||
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||||
if f.outputFormat == outputJSON {
|
if f.outputFormat == outputJSON {
|
||||||
buf.WriteByte('{')
|
buf.WriteByte('{') // for the whole line
|
||||||
}
|
}
|
||||||
|
|
||||||
vals := builtins
|
vals := builtins
|
||||||
if hook := f.opts.RenderBuiltinsHook; hook != nil {
|
if hook := f.opts.RenderBuiltinsHook; hook != nil {
|
||||||
vals = hook(f.sanitize(vals))
|
vals = hook(f.sanitize(vals))
|
||||||
}
|
}
|
||||||
f.flatten(buf, vals, false, false) // keys are ours, no need to escape
|
f.flatten(buf, vals, false, false) // keys are ours, no need to escape
|
||||||
continuing := len(builtins) > 0
|
continuing := len(builtins) > 0
|
||||||
if len(f.valuesStr) > 0 {
|
|
||||||
|
if f.parentValuesStr != "" {
|
||||||
if continuing {
|
if continuing {
|
||||||
if f.outputFormat == outputJSON {
|
buf.WriteByte(f.comma())
|
||||||
buf.WriteByte(',')
|
|
||||||
} else {
|
|
||||||
buf.WriteByte(' ')
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
buf.WriteString(f.parentValuesStr)
|
||||||
continuing = true
|
continuing = true
|
||||||
buf.WriteString(f.valuesStr)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
groupDepth := f.groupDepth
|
||||||
|
if f.group != "" {
|
||||||
|
if f.valuesStr != "" || len(args) != 0 {
|
||||||
|
if continuing {
|
||||||
|
buf.WriteByte(f.comma())
|
||||||
|
}
|
||||||
|
buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys
|
||||||
|
buf.WriteByte(f.colon())
|
||||||
|
buf.WriteByte('{') // for the group
|
||||||
|
continuing = false
|
||||||
|
} else {
|
||||||
|
// The group was empty
|
||||||
|
groupDepth--
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if f.valuesStr != "" {
|
||||||
|
if continuing {
|
||||||
|
buf.WriteByte(f.comma())
|
||||||
|
}
|
||||||
|
buf.WriteString(f.valuesStr)
|
||||||
|
continuing = true
|
||||||
|
}
|
||||||
|
|
||||||
vals = args
|
vals = args
|
||||||
if hook := f.opts.RenderArgsHook; hook != nil {
|
if hook := f.opts.RenderArgsHook; hook != nil {
|
||||||
vals = hook(f.sanitize(vals))
|
vals = hook(f.sanitize(vals))
|
||||||
}
|
}
|
||||||
f.flatten(buf, vals, continuing, true) // escape user-provided keys
|
f.flatten(buf, vals, continuing, true) // escape user-provided keys
|
||||||
if f.outputFormat == outputJSON {
|
|
||||||
buf.WriteByte('}')
|
for i := 0; i < groupDepth; i++ {
|
||||||
|
buf.WriteByte('}') // for the groups
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if f.outputFormat == outputJSON {
|
||||||
|
buf.WriteByte('}') // for the whole line
|
||||||
|
}
|
||||||
|
|
||||||
return buf.String()
|
return buf.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -298,9 +339,16 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc
|
||||||
if len(kvList)%2 != 0 {
|
if len(kvList)%2 != 0 {
|
||||||
kvList = append(kvList, noValue)
|
kvList = append(kvList, noValue)
|
||||||
}
|
}
|
||||||
|
copied := false
|
||||||
for i := 0; i < len(kvList); i += 2 {
|
for i := 0; i < len(kvList); i += 2 {
|
||||||
k, ok := kvList[i].(string)
|
k, ok := kvList[i].(string)
|
||||||
if !ok {
|
if !ok {
|
||||||
|
if !copied {
|
||||||
|
newList := make([]any, len(kvList))
|
||||||
|
copy(newList, kvList)
|
||||||
|
kvList = newList
|
||||||
|
copied = true
|
||||||
|
}
|
||||||
k = f.nonStringKey(kvList[i])
|
k = f.nonStringKey(kvList[i])
|
||||||
kvList[i] = k
|
kvList[i] = k
|
||||||
}
|
}
|
||||||
|
@ -308,7 +356,7 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc
|
||||||
|
|
||||||
if i > 0 || continuing {
|
if i > 0 || continuing {
|
||||||
if f.outputFormat == outputJSON {
|
if f.outputFormat == outputJSON {
|
||||||
buf.WriteByte(',')
|
buf.WriteByte(f.comma())
|
||||||
} else {
|
} else {
|
||||||
// In theory the format could be something we don't understand. In
|
// In theory the format could be something we don't understand. In
|
||||||
// practice, we control it, so it won't be.
|
// practice, we control it, so it won't be.
|
||||||
|
@ -316,24 +364,35 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if escapeKeys {
|
buf.WriteString(f.quoted(k, escapeKeys))
|
||||||
buf.WriteString(prettyString(k))
|
buf.WriteByte(f.colon())
|
||||||
} else {
|
|
||||||
// this is faster
|
|
||||||
buf.WriteByte('"')
|
|
||||||
buf.WriteString(k)
|
|
||||||
buf.WriteByte('"')
|
|
||||||
}
|
|
||||||
if f.outputFormat == outputJSON {
|
|
||||||
buf.WriteByte(':')
|
|
||||||
} else {
|
|
||||||
buf.WriteByte('=')
|
|
||||||
}
|
|
||||||
buf.WriteString(f.pretty(v))
|
buf.WriteString(f.pretty(v))
|
||||||
}
|
}
|
||||||
return kvList
|
return kvList
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (f Formatter) quoted(str string, escape bool) string {
|
||||||
|
if escape {
|
||||||
|
return prettyString(str)
|
||||||
|
}
|
||||||
|
// this is faster
|
||||||
|
return `"` + str + `"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f Formatter) comma() byte {
|
||||||
|
if f.outputFormat == outputJSON {
|
||||||
|
return ','
|
||||||
|
}
|
||||||
|
return ' '
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f Formatter) colon() byte {
|
||||||
|
if f.outputFormat == outputJSON {
|
||||||
|
return ':'
|
||||||
|
}
|
||||||
|
return '='
|
||||||
|
}
|
||||||
|
|
||||||
func (f Formatter) pretty(value any) string {
|
func (f Formatter) pretty(value any) string {
|
||||||
return f.prettyWithFlags(value, 0, 0)
|
return f.prettyWithFlags(value, 0, 0)
|
||||||
}
|
}
|
||||||
|
@ -407,12 +466,12 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
|
||||||
}
|
}
|
||||||
for i := 0; i < len(v); i += 2 {
|
for i := 0; i < len(v); i += 2 {
|
||||||
if i > 0 {
|
if i > 0 {
|
||||||
buf.WriteByte(',')
|
buf.WriteByte(f.comma())
|
||||||
}
|
}
|
||||||
k, _ := v[i].(string) // sanitize() above means no need to check success
|
k, _ := v[i].(string) // sanitize() above means no need to check success
|
||||||
// arbitrary keys might need escaping
|
// arbitrary keys might need escaping
|
||||||
buf.WriteString(prettyString(k))
|
buf.WriteString(prettyString(k))
|
||||||
buf.WriteByte(':')
|
buf.WriteByte(f.colon())
|
||||||
buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1))
|
buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1))
|
||||||
}
|
}
|
||||||
if flags&flagRawStruct == 0 {
|
if flags&flagRawStruct == 0 {
|
||||||
|
@ -481,7 +540,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if printComma {
|
if printComma {
|
||||||
buf.WriteByte(',')
|
buf.WriteByte(f.comma())
|
||||||
}
|
}
|
||||||
printComma = true // if we got here, we are rendering a field
|
printComma = true // if we got here, we are rendering a field
|
||||||
if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" {
|
if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" {
|
||||||
|
@ -492,10 +551,8 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
|
||||||
name = fld.Name
|
name = fld.Name
|
||||||
}
|
}
|
||||||
// field names can't contain characters which need escaping
|
// field names can't contain characters which need escaping
|
||||||
buf.WriteByte('"')
|
buf.WriteString(f.quoted(name, false))
|
||||||
buf.WriteString(name)
|
buf.WriteByte(f.colon())
|
||||||
buf.WriteByte('"')
|
|
||||||
buf.WriteByte(':')
|
|
||||||
buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1))
|
buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1))
|
||||||
}
|
}
|
||||||
if flags&flagRawStruct == 0 {
|
if flags&flagRawStruct == 0 {
|
||||||
|
@ -520,7 +577,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
|
||||||
buf.WriteByte('[')
|
buf.WriteByte('[')
|
||||||
for i := 0; i < v.Len(); i++ {
|
for i := 0; i < v.Len(); i++ {
|
||||||
if i > 0 {
|
if i > 0 {
|
||||||
buf.WriteByte(',')
|
buf.WriteByte(f.comma())
|
||||||
}
|
}
|
||||||
e := v.Index(i)
|
e := v.Index(i)
|
||||||
buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1))
|
buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1))
|
||||||
|
@ -534,7 +591,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
|
||||||
i := 0
|
i := 0
|
||||||
for it.Next() {
|
for it.Next() {
|
||||||
if i > 0 {
|
if i > 0 {
|
||||||
buf.WriteByte(',')
|
buf.WriteByte(f.comma())
|
||||||
}
|
}
|
||||||
// If a map key supports TextMarshaler, use it.
|
// If a map key supports TextMarshaler, use it.
|
||||||
keystr := ""
|
keystr := ""
|
||||||
|
@ -556,7 +613,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
buf.WriteString(keystr)
|
buf.WriteString(keystr)
|
||||||
buf.WriteByte(':')
|
buf.WriteByte(f.colon())
|
||||||
buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1))
|
buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1))
|
||||||
i++
|
i++
|
||||||
}
|
}
|
||||||
|
@ -706,6 +763,53 @@ func (f Formatter) sanitize(kvList []any) []any {
|
||||||
return kvList
|
return kvList
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// startGroup opens a new group scope (basically a sub-struct), which locks all
|
||||||
|
// the current saved values and starts them anew. This is needed to satisfy
|
||||||
|
// slog.
|
||||||
|
func (f *Formatter) startGroup(group string) {
|
||||||
|
// Unnamed groups are just inlined.
|
||||||
|
if group == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Any saved values can no longer be changed.
|
||||||
|
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||||
|
continuing := false
|
||||||
|
|
||||||
|
if f.parentValuesStr != "" {
|
||||||
|
buf.WriteString(f.parentValuesStr)
|
||||||
|
continuing = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if f.group != "" && f.valuesStr != "" {
|
||||||
|
if continuing {
|
||||||
|
buf.WriteByte(f.comma())
|
||||||
|
}
|
||||||
|
buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys
|
||||||
|
buf.WriteByte(f.colon())
|
||||||
|
buf.WriteByte('{') // for the group
|
||||||
|
continuing = false
|
||||||
|
}
|
||||||
|
|
||||||
|
if f.valuesStr != "" {
|
||||||
|
if continuing {
|
||||||
|
buf.WriteByte(f.comma())
|
||||||
|
}
|
||||||
|
buf.WriteString(f.valuesStr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NOTE: We don't close the scope here - that's done later, when a log line
|
||||||
|
// is actually rendered (because we have N scopes to close).
|
||||||
|
|
||||||
|
f.parentValuesStr = buf.String()
|
||||||
|
|
||||||
|
// Start collecting new values.
|
||||||
|
f.group = group
|
||||||
|
f.groupDepth++
|
||||||
|
f.valuesStr = ""
|
||||||
|
f.values = nil
|
||||||
|
}
|
||||||
|
|
||||||
// Init configures this Formatter from runtime info, such as the call depth
|
// Init configures this Formatter from runtime info, such as the call depth
|
||||||
// imposed by logr itself.
|
// imposed by logr itself.
|
||||||
// Note that this receiver is a pointer, so depth can be saved.
|
// Note that this receiver is a pointer, so depth can be saved.
|
||||||
|
@ -740,7 +844,10 @@ func (f Formatter) FormatInfo(level int, msg string, kvList []any) (prefix, args
|
||||||
if policy := f.opts.LogCaller; policy == All || policy == Info {
|
if policy := f.opts.LogCaller; policy == All || policy == Info {
|
||||||
args = append(args, "caller", f.caller())
|
args = append(args, "caller", f.caller())
|
||||||
}
|
}
|
||||||
args = append(args, "level", level, "msg", msg)
|
if key := *f.opts.LogInfoLevel; key != "" {
|
||||||
|
args = append(args, key, level)
|
||||||
|
}
|
||||||
|
args = append(args, "msg", msg)
|
||||||
return prefix, f.render(args, kvList)
|
return prefix, f.render(args, kvList)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,105 @@
|
||||||
|
//go:build go1.21
|
||||||
|
// +build go1.21
|
||||||
|
|
||||||
|
/*
|
||||||
|
Copyright 2023 The logr Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package funcr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"log/slog"
|
||||||
|
|
||||||
|
"github.com/go-logr/logr"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ logr.SlogSink = &fnlogger{}
|
||||||
|
|
||||||
|
const extraSlogSinkDepth = 3 // 2 for slog, 1 for SlogSink
|
||||||
|
|
||||||
|
func (l fnlogger) Handle(_ context.Context, record slog.Record) error {
|
||||||
|
kvList := make([]any, 0, 2*record.NumAttrs())
|
||||||
|
record.Attrs(func(attr slog.Attr) bool {
|
||||||
|
kvList = attrToKVs(attr, kvList)
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
|
||||||
|
if record.Level >= slog.LevelError {
|
||||||
|
l.WithCallDepth(extraSlogSinkDepth).Error(nil, record.Message, kvList...)
|
||||||
|
} else {
|
||||||
|
level := l.levelFromSlog(record.Level)
|
||||||
|
l.WithCallDepth(extraSlogSinkDepth).Info(level, record.Message, kvList...)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l fnlogger) WithAttrs(attrs []slog.Attr) logr.SlogSink {
|
||||||
|
kvList := make([]any, 0, 2*len(attrs))
|
||||||
|
for _, attr := range attrs {
|
||||||
|
kvList = attrToKVs(attr, kvList)
|
||||||
|
}
|
||||||
|
l.AddValues(kvList)
|
||||||
|
return &l
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l fnlogger) WithGroup(name string) logr.SlogSink {
|
||||||
|
l.startGroup(name)
|
||||||
|
return &l
|
||||||
|
}
|
||||||
|
|
||||||
|
// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups
|
||||||
|
// and other details of slog.
|
||||||
|
func attrToKVs(attr slog.Attr, kvList []any) []any {
|
||||||
|
attrVal := attr.Value.Resolve()
|
||||||
|
if attrVal.Kind() == slog.KindGroup {
|
||||||
|
groupVal := attrVal.Group()
|
||||||
|
grpKVs := make([]any, 0, 2*len(groupVal))
|
||||||
|
for _, attr := range groupVal {
|
||||||
|
grpKVs = attrToKVs(attr, grpKVs)
|
||||||
|
}
|
||||||
|
if attr.Key == "" {
|
||||||
|
// slog says we have to inline these
|
||||||
|
kvList = append(kvList, grpKVs...)
|
||||||
|
} else {
|
||||||
|
kvList = append(kvList, attr.Key, PseudoStruct(grpKVs))
|
||||||
|
}
|
||||||
|
} else if attr.Key != "" {
|
||||||
|
kvList = append(kvList, attr.Key, attrVal.Any())
|
||||||
|
}
|
||||||
|
|
||||||
|
return kvList
|
||||||
|
}
|
||||||
|
|
||||||
|
// levelFromSlog adjusts the level by the logger's verbosity and negates it.
|
||||||
|
// It ensures that the result is >= 0. This is necessary because the result is
|
||||||
|
// passed to a LogSink and that API did not historically document whether
|
||||||
|
// levels could be negative or what that meant.
|
||||||
|
//
|
||||||
|
// Some example usage:
|
||||||
|
//
|
||||||
|
// logrV0 := getMyLogger()
|
||||||
|
// logrV2 := logrV0.V(2)
|
||||||
|
// slogV2 := slog.New(logr.ToSlogHandler(logrV2))
|
||||||
|
// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6)
|
||||||
|
// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2)
|
||||||
|
// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0)
|
||||||
|
func (l fnlogger) levelFromSlog(level slog.Level) int {
|
||||||
|
result := -level
|
||||||
|
if result < 0 {
|
||||||
|
result = 0 // because LogSink doesn't expect negative V levels
|
||||||
|
}
|
||||||
|
return int(result)
|
||||||
|
}
|
|
@ -207,10 +207,6 @@ limitations under the License.
|
||||||
// those.
|
// those.
|
||||||
package logr
|
package logr
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
)
|
|
||||||
|
|
||||||
// New returns a new Logger instance. This is primarily used by libraries
|
// New returns a new Logger instance. This is primarily used by libraries
|
||||||
// implementing LogSink, rather than end users. Passing a nil sink will create
|
// implementing LogSink, rather than end users. Passing a nil sink will create
|
||||||
// a Logger which discards all log lines.
|
// a Logger which discards all log lines.
|
||||||
|
@ -410,45 +406,6 @@ func (l Logger) IsZero() bool {
|
||||||
return l.sink == nil
|
return l.sink == nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// contextKey is how we find Loggers in a context.Context.
|
|
||||||
type contextKey struct{}
|
|
||||||
|
|
||||||
// FromContext returns a Logger from ctx or an error if no Logger is found.
|
|
||||||
func FromContext(ctx context.Context) (Logger, error) {
|
|
||||||
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return Logger{}, notFoundError{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// notFoundError exists to carry an IsNotFound method.
|
|
||||||
type notFoundError struct{}
|
|
||||||
|
|
||||||
func (notFoundError) Error() string {
|
|
||||||
return "no logr.Logger was present"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (notFoundError) IsNotFound() bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
|
|
||||||
// returns a Logger that discards all log messages.
|
|
||||||
func FromContextOrDiscard(ctx context.Context) Logger {
|
|
||||||
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
return Discard()
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewContext returns a new Context, derived from ctx, which carries the
|
|
||||||
// provided Logger.
|
|
||||||
func NewContext(ctx context.Context, logger Logger) context.Context {
|
|
||||||
return context.WithValue(ctx, contextKey{}, logger)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RuntimeInfo holds information that the logr "core" library knows which
|
// RuntimeInfo holds information that the logr "core" library knows which
|
||||||
// LogSinks might want to know.
|
// LogSinks might want to know.
|
||||||
type RuntimeInfo struct {
|
type RuntimeInfo struct {
|
||||||
|
|
|
@ -0,0 +1,192 @@
|
||||||
|
//go:build go1.21
|
||||||
|
// +build go1.21
|
||||||
|
|
||||||
|
/*
|
||||||
|
Copyright 2023 The logr Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package logr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"log/slog"
|
||||||
|
)
|
||||||
|
|
||||||
|
type slogHandler struct {
|
||||||
|
// May be nil, in which case all logs get discarded.
|
||||||
|
sink LogSink
|
||||||
|
// Non-nil if sink is non-nil and implements SlogSink.
|
||||||
|
slogSink SlogSink
|
||||||
|
|
||||||
|
// groupPrefix collects values from WithGroup calls. It gets added as
|
||||||
|
// prefix to value keys when handling a log record.
|
||||||
|
groupPrefix string
|
||||||
|
|
||||||
|
// levelBias can be set when constructing the handler to influence the
|
||||||
|
// slog.Level of log records. A positive levelBias reduces the
|
||||||
|
// slog.Level value. slog has no API to influence this value after the
|
||||||
|
// handler got created, so it can only be set indirectly through
|
||||||
|
// Logger.V.
|
||||||
|
levelBias slog.Level
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ slog.Handler = &slogHandler{}
|
||||||
|
|
||||||
|
// groupSeparator is used to concatenate WithGroup names and attribute keys.
|
||||||
|
const groupSeparator = "."
|
||||||
|
|
||||||
|
// GetLevel is used for black box unit testing.
|
||||||
|
func (l *slogHandler) GetLevel() slog.Level {
|
||||||
|
return l.levelBias
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *slogHandler) Enabled(_ context.Context, level slog.Level) bool {
|
||||||
|
return l.sink != nil && (level >= slog.LevelError || l.sink.Enabled(l.levelFromSlog(level)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *slogHandler) Handle(ctx context.Context, record slog.Record) error {
|
||||||
|
if l.slogSink != nil {
|
||||||
|
// Only adjust verbosity level of log entries < slog.LevelError.
|
||||||
|
if record.Level < slog.LevelError {
|
||||||
|
record.Level -= l.levelBias
|
||||||
|
}
|
||||||
|
return l.slogSink.Handle(ctx, record)
|
||||||
|
}
|
||||||
|
|
||||||
|
// No need to check for nil sink here because Handle will only be called
|
||||||
|
// when Enabled returned true.
|
||||||
|
|
||||||
|
kvList := make([]any, 0, 2*record.NumAttrs())
|
||||||
|
record.Attrs(func(attr slog.Attr) bool {
|
||||||
|
kvList = attrToKVs(attr, l.groupPrefix, kvList)
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
if record.Level >= slog.LevelError {
|
||||||
|
l.sinkWithCallDepth().Error(nil, record.Message, kvList...)
|
||||||
|
} else {
|
||||||
|
level := l.levelFromSlog(record.Level)
|
||||||
|
l.sinkWithCallDepth().Info(level, record.Message, kvList...)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// sinkWithCallDepth adjusts the stack unwinding so that when Error or Info
|
||||||
|
// are called by Handle, code in slog gets skipped.
|
||||||
|
//
|
||||||
|
// This offset currently (Go 1.21.0) works for calls through
|
||||||
|
// slog.New(ToSlogHandler(...)). There's no guarantee that the call
|
||||||
|
// chain won't change. Wrapping the handler will also break unwinding. It's
|
||||||
|
// still better than not adjusting at all....
|
||||||
|
//
|
||||||
|
// This cannot be done when constructing the handler because FromSlogHandler needs
|
||||||
|
// access to the original sink without this adjustment. A second copy would
|
||||||
|
// work, but then WithAttrs would have to be called for both of them.
|
||||||
|
func (l *slogHandler) sinkWithCallDepth() LogSink {
|
||||||
|
if sink, ok := l.sink.(CallDepthLogSink); ok {
|
||||||
|
return sink.WithCallDepth(2)
|
||||||
|
}
|
||||||
|
return l.sink
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *slogHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
|
||||||
|
if l.sink == nil || len(attrs) == 0 {
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
clone := *l
|
||||||
|
if l.slogSink != nil {
|
||||||
|
clone.slogSink = l.slogSink.WithAttrs(attrs)
|
||||||
|
clone.sink = clone.slogSink
|
||||||
|
} else {
|
||||||
|
kvList := make([]any, 0, 2*len(attrs))
|
||||||
|
for _, attr := range attrs {
|
||||||
|
kvList = attrToKVs(attr, l.groupPrefix, kvList)
|
||||||
|
}
|
||||||
|
clone.sink = l.sink.WithValues(kvList...)
|
||||||
|
}
|
||||||
|
return &clone
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *slogHandler) WithGroup(name string) slog.Handler {
|
||||||
|
if l.sink == nil {
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
if name == "" {
|
||||||
|
// slog says to inline empty groups
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
clone := *l
|
||||||
|
if l.slogSink != nil {
|
||||||
|
clone.slogSink = l.slogSink.WithGroup(name)
|
||||||
|
clone.sink = clone.slogSink
|
||||||
|
} else {
|
||||||
|
clone.groupPrefix = addPrefix(clone.groupPrefix, name)
|
||||||
|
}
|
||||||
|
return &clone
|
||||||
|
}
|
||||||
|
|
||||||
|
// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups
|
||||||
|
// and other details of slog.
|
||||||
|
func attrToKVs(attr slog.Attr, groupPrefix string, kvList []any) []any {
|
||||||
|
attrVal := attr.Value.Resolve()
|
||||||
|
if attrVal.Kind() == slog.KindGroup {
|
||||||
|
groupVal := attrVal.Group()
|
||||||
|
grpKVs := make([]any, 0, 2*len(groupVal))
|
||||||
|
prefix := groupPrefix
|
||||||
|
if attr.Key != "" {
|
||||||
|
prefix = addPrefix(groupPrefix, attr.Key)
|
||||||
|
}
|
||||||
|
for _, attr := range groupVal {
|
||||||
|
grpKVs = attrToKVs(attr, prefix, grpKVs)
|
||||||
|
}
|
||||||
|
kvList = append(kvList, grpKVs...)
|
||||||
|
} else if attr.Key != "" {
|
||||||
|
kvList = append(kvList, addPrefix(groupPrefix, attr.Key), attrVal.Any())
|
||||||
|
}
|
||||||
|
|
||||||
|
return kvList
|
||||||
|
}
|
||||||
|
|
||||||
|
func addPrefix(prefix, name string) string {
|
||||||
|
if prefix == "" {
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
if name == "" {
|
||||||
|
return prefix
|
||||||
|
}
|
||||||
|
return prefix + groupSeparator + name
|
||||||
|
}
|
||||||
|
|
||||||
|
// levelFromSlog adjusts the level by the logger's verbosity and negates it.
|
||||||
|
// It ensures that the result is >= 0. This is necessary because the result is
|
||||||
|
// passed to a LogSink and that API did not historically document whether
|
||||||
|
// levels could be negative or what that meant.
|
||||||
|
//
|
||||||
|
// Some example usage:
|
||||||
|
//
|
||||||
|
// logrV0 := getMyLogger()
|
||||||
|
// logrV2 := logrV0.V(2)
|
||||||
|
// slogV2 := slog.New(logr.ToSlogHandler(logrV2))
|
||||||
|
// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6)
|
||||||
|
// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2)
|
||||||
|
// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0)
|
||||||
|
func (l *slogHandler) levelFromSlog(level slog.Level) int {
|
||||||
|
result := -level
|
||||||
|
result += l.levelBias // in case the original Logger had a V level
|
||||||
|
if result < 0 {
|
||||||
|
result = 0 // because LogSink doesn't expect negative V levels
|
||||||
|
}
|
||||||
|
return int(result)
|
||||||
|
}
|
|
@ -0,0 +1,100 @@
|
||||||
|
//go:build go1.21
|
||||||
|
// +build go1.21
|
||||||
|
|
||||||
|
/*
|
||||||
|
Copyright 2023 The logr Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package logr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"log/slog"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FromSlogHandler returns a Logger which writes to the slog.Handler.
|
||||||
|
//
|
||||||
|
// The logr verbosity level is mapped to slog levels such that V(0) becomes
|
||||||
|
// slog.LevelInfo and V(4) becomes slog.LevelDebug.
|
||||||
|
func FromSlogHandler(handler slog.Handler) Logger {
|
||||||
|
if handler, ok := handler.(*slogHandler); ok {
|
||||||
|
if handler.sink == nil {
|
||||||
|
return Discard()
|
||||||
|
}
|
||||||
|
return New(handler.sink).V(int(handler.levelBias))
|
||||||
|
}
|
||||||
|
return New(&slogSink{handler: handler})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToSlogHandler returns a slog.Handler which writes to the same sink as the Logger.
|
||||||
|
//
|
||||||
|
// The returned logger writes all records with level >= slog.LevelError as
|
||||||
|
// error log entries with LogSink.Error, regardless of the verbosity level of
|
||||||
|
// the Logger:
|
||||||
|
//
|
||||||
|
// logger := <some Logger with 0 as verbosity level>
|
||||||
|
// slog.New(ToSlogHandler(logger.V(10))).Error(...) -> logSink.Error(...)
|
||||||
|
//
|
||||||
|
// The level of all other records gets reduced by the verbosity
|
||||||
|
// level of the Logger and the result is negated. If it happens
|
||||||
|
// to be negative, then it gets replaced by zero because a LogSink
|
||||||
|
// is not expected to handled negative levels:
|
||||||
|
//
|
||||||
|
// slog.New(ToSlogHandler(logger)).Debug(...) -> logger.GetSink().Info(level=4, ...)
|
||||||
|
// slog.New(ToSlogHandler(logger)).Warning(...) -> logger.GetSink().Info(level=0, ...)
|
||||||
|
// slog.New(ToSlogHandler(logger)).Info(...) -> logger.GetSink().Info(level=0, ...)
|
||||||
|
// slog.New(ToSlogHandler(logger.V(4))).Info(...) -> logger.GetSink().Info(level=4, ...)
|
||||||
|
func ToSlogHandler(logger Logger) slog.Handler {
|
||||||
|
if sink, ok := logger.GetSink().(*slogSink); ok && logger.GetV() == 0 {
|
||||||
|
return sink.handler
|
||||||
|
}
|
||||||
|
|
||||||
|
handler := &slogHandler{sink: logger.GetSink(), levelBias: slog.Level(logger.GetV())}
|
||||||
|
if slogSink, ok := handler.sink.(SlogSink); ok {
|
||||||
|
handler.slogSink = slogSink
|
||||||
|
}
|
||||||
|
return handler
|
||||||
|
}
|
||||||
|
|
||||||
|
// SlogSink is an optional interface that a LogSink can implement to support
|
||||||
|
// logging through the slog.Logger or slog.Handler APIs better. It then should
|
||||||
|
// also support special slog values like slog.Group. When used as a
|
||||||
|
// slog.Handler, the advantages are:
|
||||||
|
//
|
||||||
|
// - stack unwinding gets avoided in favor of logging the pre-recorded PC,
|
||||||
|
// as intended by slog
|
||||||
|
// - proper grouping of key/value pairs via WithGroup
|
||||||
|
// - verbosity levels > slog.LevelInfo can be recorded
|
||||||
|
// - less overhead
|
||||||
|
//
|
||||||
|
// Both APIs (Logger and slog.Logger/Handler) then are supported equally
|
||||||
|
// well. Developers can pick whatever API suits them better and/or mix
|
||||||
|
// packages which use either API in the same binary with a common logging
|
||||||
|
// implementation.
|
||||||
|
//
|
||||||
|
// This interface is necessary because the type implementing the LogSink
|
||||||
|
// interface cannot also implement the slog.Handler interface due to the
|
||||||
|
// different prototype of the common Enabled method.
|
||||||
|
//
|
||||||
|
// An implementation could support both interfaces in two different types, but then
|
||||||
|
// additional interfaces would be needed to convert between those types in FromSlogHandler
|
||||||
|
// and ToSlogHandler.
|
||||||
|
type SlogSink interface {
|
||||||
|
LogSink
|
||||||
|
|
||||||
|
Handle(ctx context.Context, record slog.Record) error
|
||||||
|
WithAttrs(attrs []slog.Attr) SlogSink
|
||||||
|
WithGroup(name string) SlogSink
|
||||||
|
}
|
|
@ -0,0 +1,120 @@
|
||||||
|
//go:build go1.21
|
||||||
|
// +build go1.21
|
||||||
|
|
||||||
|
/*
|
||||||
|
Copyright 2023 The logr Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package logr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"log/slog"
|
||||||
|
"runtime"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
_ LogSink = &slogSink{}
|
||||||
|
_ CallDepthLogSink = &slogSink{}
|
||||||
|
_ Underlier = &slogSink{}
|
||||||
|
)
|
||||||
|
|
||||||
|
// Underlier is implemented by the LogSink returned by NewFromLogHandler.
|
||||||
|
type Underlier interface {
|
||||||
|
// GetUnderlying returns the Handler used by the LogSink.
|
||||||
|
GetUnderlying() slog.Handler
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// nameKey is used to log the `WithName` values as an additional attribute.
|
||||||
|
nameKey = "logger"
|
||||||
|
|
||||||
|
// errKey is used to log the error parameter of Error as an additional attribute.
|
||||||
|
errKey = "err"
|
||||||
|
)
|
||||||
|
|
||||||
|
type slogSink struct {
|
||||||
|
callDepth int
|
||||||
|
name string
|
||||||
|
handler slog.Handler
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *slogSink) Init(info RuntimeInfo) {
|
||||||
|
l.callDepth = info.CallDepth
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *slogSink) GetUnderlying() slog.Handler {
|
||||||
|
return l.handler
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *slogSink) WithCallDepth(depth int) LogSink {
|
||||||
|
newLogger := *l
|
||||||
|
newLogger.callDepth += depth
|
||||||
|
return &newLogger
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *slogSink) Enabled(level int) bool {
|
||||||
|
return l.handler.Enabled(context.Background(), slog.Level(-level))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *slogSink) Info(level int, msg string, kvList ...interface{}) {
|
||||||
|
l.log(nil, msg, slog.Level(-level), kvList...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *slogSink) Error(err error, msg string, kvList ...interface{}) {
|
||||||
|
l.log(err, msg, slog.LevelError, kvList...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *slogSink) log(err error, msg string, level slog.Level, kvList ...interface{}) {
|
||||||
|
var pcs [1]uintptr
|
||||||
|
// skip runtime.Callers, this function, Info/Error, and all helper functions above that.
|
||||||
|
runtime.Callers(3+l.callDepth, pcs[:])
|
||||||
|
|
||||||
|
record := slog.NewRecord(time.Now(), level, msg, pcs[0])
|
||||||
|
if l.name != "" {
|
||||||
|
record.AddAttrs(slog.String(nameKey, l.name))
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
record.AddAttrs(slog.Any(errKey, err))
|
||||||
|
}
|
||||||
|
record.Add(kvList...)
|
||||||
|
_ = l.handler.Handle(context.Background(), record)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l slogSink) WithName(name string) LogSink {
|
||||||
|
if l.name != "" {
|
||||||
|
l.name += "/"
|
||||||
|
}
|
||||||
|
l.name += name
|
||||||
|
return &l
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l slogSink) WithValues(kvList ...interface{}) LogSink {
|
||||||
|
l.handler = l.handler.WithAttrs(kvListToAttrs(kvList...))
|
||||||
|
return &l
|
||||||
|
}
|
||||||
|
|
||||||
|
func kvListToAttrs(kvList ...interface{}) []slog.Attr {
|
||||||
|
// We don't need the record itself, only its Add method.
|
||||||
|
record := slog.NewRecord(time.Time{}, 0, "", 0)
|
||||||
|
record.Add(kvList...)
|
||||||
|
attrs := make([]slog.Attr, 0, record.NumAttrs())
|
||||||
|
record.Attrs(func(attr slog.Attr) bool {
|
||||||
|
attrs = append(attrs, attr)
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
return attrs
|
||||||
|
}
|
|
@ -0,0 +1,27 @@
|
||||||
|
Copyright (c) 2015, Gengo, Inc.
|
||||||
|
All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without modification,
|
||||||
|
are permitted provided that the following conditions are met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright notice,
|
||||||
|
this list of conditions and the following disclaimer.
|
||||||
|
|
||||||
|
* Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
this list of conditions and the following disclaimer in the documentation
|
||||||
|
and/or other materials provided with the distribution.
|
||||||
|
|
||||||
|
* Neither the name of Gengo, Inc. nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from this
|
||||||
|
software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||||
|
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||||
|
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||||
|
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||||
|
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||||
|
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||||
|
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||||
|
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||||
|
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
35
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel
generated
vendored
Normal file
35
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel
generated
vendored
Normal file
|
@ -0,0 +1,35 @@
|
||||||
|
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||||
|
|
||||||
|
package(default_visibility = ["//visibility:public"])
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "httprule",
|
||||||
|
srcs = [
|
||||||
|
"compile.go",
|
||||||
|
"parse.go",
|
||||||
|
"types.go",
|
||||||
|
],
|
||||||
|
importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule",
|
||||||
|
deps = ["//utilities"],
|
||||||
|
)
|
||||||
|
|
||||||
|
go_test(
|
||||||
|
name = "httprule_test",
|
||||||
|
size = "small",
|
||||||
|
srcs = [
|
||||||
|
"compile_test.go",
|
||||||
|
"parse_test.go",
|
||||||
|
"types_test.go",
|
||||||
|
],
|
||||||
|
embed = [":httprule"],
|
||||||
|
deps = [
|
||||||
|
"//utilities",
|
||||||
|
"@com_github_golang_glog//:glog",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
alias(
|
||||||
|
name = "go_default_library",
|
||||||
|
actual = ":httprule",
|
||||||
|
visibility = ["//:__subpackages__"],
|
||||||
|
)
|
121
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/compile.go
generated
vendored
Normal file
121
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/compile.go
generated
vendored
Normal file
|
@ -0,0 +1,121 @@
|
||||||
|
package httprule
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
opcodeVersion = 1
|
||||||
|
)
|
||||||
|
|
||||||
|
// Template is a compiled representation of path templates.
|
||||||
|
type Template struct {
|
||||||
|
// Version is the version number of the format.
|
||||||
|
Version int
|
||||||
|
// OpCodes is a sequence of operations.
|
||||||
|
OpCodes []int
|
||||||
|
// Pool is a constant pool
|
||||||
|
Pool []string
|
||||||
|
// Verb is a VERB part in the template.
|
||||||
|
Verb string
|
||||||
|
// Fields is a list of field paths bound in this template.
|
||||||
|
Fields []string
|
||||||
|
// Original template (example: /v1/a_bit_of_everything)
|
||||||
|
Template string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compiler compiles utilities representation of path templates into marshallable operations.
|
||||||
|
// They can be unmarshalled by runtime.NewPattern.
|
||||||
|
type Compiler interface {
|
||||||
|
Compile() Template
|
||||||
|
}
|
||||||
|
|
||||||
|
type op struct {
|
||||||
|
// code is the opcode of the operation
|
||||||
|
code utilities.OpCode
|
||||||
|
|
||||||
|
// str is a string operand of the code.
|
||||||
|
// num is ignored if str is not empty.
|
||||||
|
str string
|
||||||
|
|
||||||
|
// num is a numeric operand of the code.
|
||||||
|
num int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w wildcard) compile() []op {
|
||||||
|
return []op{
|
||||||
|
{code: utilities.OpPush},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w deepWildcard) compile() []op {
|
||||||
|
return []op{
|
||||||
|
{code: utilities.OpPushM},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l literal) compile() []op {
|
||||||
|
return []op{
|
||||||
|
{
|
||||||
|
code: utilities.OpLitPush,
|
||||||
|
str: string(l),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v variable) compile() []op {
|
||||||
|
var ops []op
|
||||||
|
for _, s := range v.segments {
|
||||||
|
ops = append(ops, s.compile()...)
|
||||||
|
}
|
||||||
|
ops = append(ops, op{
|
||||||
|
code: utilities.OpConcatN,
|
||||||
|
num: len(v.segments),
|
||||||
|
}, op{
|
||||||
|
code: utilities.OpCapture,
|
||||||
|
str: v.path,
|
||||||
|
})
|
||||||
|
|
||||||
|
return ops
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t template) Compile() Template {
|
||||||
|
var rawOps []op
|
||||||
|
for _, s := range t.segments {
|
||||||
|
rawOps = append(rawOps, s.compile()...)
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
ops []int
|
||||||
|
pool []string
|
||||||
|
fields []string
|
||||||
|
)
|
||||||
|
consts := make(map[string]int)
|
||||||
|
for _, op := range rawOps {
|
||||||
|
ops = append(ops, int(op.code))
|
||||||
|
if op.str == "" {
|
||||||
|
ops = append(ops, op.num)
|
||||||
|
} else {
|
||||||
|
// eof segment literal represents the "/" path pattern
|
||||||
|
if op.str == eof {
|
||||||
|
op.str = ""
|
||||||
|
}
|
||||||
|
if _, ok := consts[op.str]; !ok {
|
||||||
|
consts[op.str] = len(pool)
|
||||||
|
pool = append(pool, op.str)
|
||||||
|
}
|
||||||
|
ops = append(ops, consts[op.str])
|
||||||
|
}
|
||||||
|
if op.code == utilities.OpCapture {
|
||||||
|
fields = append(fields, op.str)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return Template{
|
||||||
|
Version: opcodeVersion,
|
||||||
|
OpCodes: ops,
|
||||||
|
Pool: pool,
|
||||||
|
Verb: t.verb,
|
||||||
|
Fields: fields,
|
||||||
|
Template: t.template,
|
||||||
|
}
|
||||||
|
}
|
11
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go
generated
vendored
Normal file
11
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
//go:build gofuzz
|
||||||
|
// +build gofuzz
|
||||||
|
|
||||||
|
package httprule
|
||||||
|
|
||||||
|
func Fuzz(data []byte) int {
|
||||||
|
if _, err := Parse(string(data)); err != nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
368
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go
generated
vendored
Normal file
368
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go
generated
vendored
Normal file
|
@ -0,0 +1,368 @@
|
||||||
|
package httprule
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// InvalidTemplateError indicates that the path template is not valid.
|
||||||
|
type InvalidTemplateError struct {
|
||||||
|
tmpl string
|
||||||
|
msg string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e InvalidTemplateError) Error() string {
|
||||||
|
return fmt.Sprintf("%s: %s", e.msg, e.tmpl)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse parses the string representation of path template
|
||||||
|
func Parse(tmpl string) (Compiler, error) {
|
||||||
|
if !strings.HasPrefix(tmpl, "/") {
|
||||||
|
return template{}, InvalidTemplateError{tmpl: tmpl, msg: "no leading /"}
|
||||||
|
}
|
||||||
|
tokens, verb := tokenize(tmpl[1:])
|
||||||
|
|
||||||
|
p := parser{tokens: tokens}
|
||||||
|
segs, err := p.topLevelSegments()
|
||||||
|
if err != nil {
|
||||||
|
return template{}, InvalidTemplateError{tmpl: tmpl, msg: err.Error()}
|
||||||
|
}
|
||||||
|
|
||||||
|
return template{
|
||||||
|
segments: segs,
|
||||||
|
verb: verb,
|
||||||
|
template: tmpl,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func tokenize(path string) (tokens []string, verb string) {
|
||||||
|
if path == "" {
|
||||||
|
return []string{eof}, ""
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
init = iota
|
||||||
|
field
|
||||||
|
nested
|
||||||
|
)
|
||||||
|
st := init
|
||||||
|
for path != "" {
|
||||||
|
var idx int
|
||||||
|
switch st {
|
||||||
|
case init:
|
||||||
|
idx = strings.IndexAny(path, "/{")
|
||||||
|
case field:
|
||||||
|
idx = strings.IndexAny(path, ".=}")
|
||||||
|
case nested:
|
||||||
|
idx = strings.IndexAny(path, "/}")
|
||||||
|
}
|
||||||
|
if idx < 0 {
|
||||||
|
tokens = append(tokens, path)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
switch r := path[idx]; r {
|
||||||
|
case '/', '.':
|
||||||
|
case '{':
|
||||||
|
st = field
|
||||||
|
case '=':
|
||||||
|
st = nested
|
||||||
|
case '}':
|
||||||
|
st = init
|
||||||
|
}
|
||||||
|
if idx == 0 {
|
||||||
|
tokens = append(tokens, path[idx:idx+1])
|
||||||
|
} else {
|
||||||
|
tokens = append(tokens, path[:idx], path[idx:idx+1])
|
||||||
|
}
|
||||||
|
path = path[idx+1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
l := len(tokens)
|
||||||
|
// See
|
||||||
|
// https://github.com/grpc-ecosystem/grpc-gateway/pull/1947#issuecomment-774523693 ;
|
||||||
|
// although normal and backwards-compat logic here is to use the last index
|
||||||
|
// of a colon, if the final segment is a variable followed by a colon, the
|
||||||
|
// part following the colon must be a verb. Hence if the previous token is
|
||||||
|
// an end var marker, we switch the index we're looking for to Index instead
|
||||||
|
// of LastIndex, so that we correctly grab the remaining part of the path as
|
||||||
|
// the verb.
|
||||||
|
var penultimateTokenIsEndVar bool
|
||||||
|
switch l {
|
||||||
|
case 0, 1:
|
||||||
|
// Not enough to be variable so skip this logic and don't result in an
|
||||||
|
// invalid index
|
||||||
|
default:
|
||||||
|
penultimateTokenIsEndVar = tokens[l-2] == "}"
|
||||||
|
}
|
||||||
|
t := tokens[l-1]
|
||||||
|
var idx int
|
||||||
|
if penultimateTokenIsEndVar {
|
||||||
|
idx = strings.Index(t, ":")
|
||||||
|
} else {
|
||||||
|
idx = strings.LastIndex(t, ":")
|
||||||
|
}
|
||||||
|
if idx == 0 {
|
||||||
|
tokens, verb = tokens[:l-1], t[1:]
|
||||||
|
} else if idx > 0 {
|
||||||
|
tokens[l-1], verb = t[:idx], t[idx+1:]
|
||||||
|
}
|
||||||
|
tokens = append(tokens, eof)
|
||||||
|
return tokens, verb
|
||||||
|
}
|
||||||
|
|
||||||
|
// parser is a parser of the template syntax defined in github.com/googleapis/googleapis/google/api/http.proto.
|
||||||
|
type parser struct {
|
||||||
|
tokens []string
|
||||||
|
accepted []string
|
||||||
|
}
|
||||||
|
|
||||||
|
// topLevelSegments is the target of this parser.
|
||||||
|
func (p *parser) topLevelSegments() ([]segment, error) {
|
||||||
|
if _, err := p.accept(typeEOF); err == nil {
|
||||||
|
p.tokens = p.tokens[:0]
|
||||||
|
return []segment{literal(eof)}, nil
|
||||||
|
}
|
||||||
|
segs, err := p.segments()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if _, err := p.accept(typeEOF); err != nil {
|
||||||
|
return nil, fmt.Errorf("unexpected token %q after segments %q", p.tokens[0], strings.Join(p.accepted, ""))
|
||||||
|
}
|
||||||
|
return segs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *parser) segments() ([]segment, error) {
|
||||||
|
s, err := p.segment()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
segs := []segment{s}
|
||||||
|
for {
|
||||||
|
if _, err := p.accept("/"); err != nil {
|
||||||
|
return segs, nil
|
||||||
|
}
|
||||||
|
s, err := p.segment()
|
||||||
|
if err != nil {
|
||||||
|
return segs, err
|
||||||
|
}
|
||||||
|
segs = append(segs, s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *parser) segment() (segment, error) {
|
||||||
|
if _, err := p.accept("*"); err == nil {
|
||||||
|
return wildcard{}, nil
|
||||||
|
}
|
||||||
|
if _, err := p.accept("**"); err == nil {
|
||||||
|
return deepWildcard{}, nil
|
||||||
|
}
|
||||||
|
if l, err := p.literal(); err == nil {
|
||||||
|
return l, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
v, err := p.variable()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("segment neither wildcards, literal or variable: %w", err)
|
||||||
|
}
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *parser) literal() (segment, error) {
|
||||||
|
lit, err := p.accept(typeLiteral)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return literal(lit), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *parser) variable() (segment, error) {
|
||||||
|
if _, err := p.accept("{"); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
path, err := p.fieldPath()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var segs []segment
|
||||||
|
if _, err := p.accept("="); err == nil {
|
||||||
|
segs, err = p.segments()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid segment in variable %q: %w", path, err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
segs = []segment{wildcard{}}
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := p.accept("}"); err != nil {
|
||||||
|
return nil, fmt.Errorf("unterminated variable segment: %s", path)
|
||||||
|
}
|
||||||
|
return variable{
|
||||||
|
path: path,
|
||||||
|
segments: segs,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *parser) fieldPath() (string, error) {
|
||||||
|
c, err := p.accept(typeIdent)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
components := []string{c}
|
||||||
|
for {
|
||||||
|
if _, err := p.accept("."); err != nil {
|
||||||
|
return strings.Join(components, "."), nil
|
||||||
|
}
|
||||||
|
c, err := p.accept(typeIdent)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("invalid field path component: %w", err)
|
||||||
|
}
|
||||||
|
components = append(components, c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A termType is a type of terminal symbols.
|
||||||
|
type termType string
|
||||||
|
|
||||||
|
// These constants define some of valid values of termType.
|
||||||
|
// They improve readability of parse functions.
|
||||||
|
//
|
||||||
|
// You can also use "/", "*", "**", "." or "=" as valid values.
|
||||||
|
const (
|
||||||
|
typeIdent = termType("ident")
|
||||||
|
typeLiteral = termType("literal")
|
||||||
|
typeEOF = termType("$")
|
||||||
|
)
|
||||||
|
|
||||||
|
// eof is the terminal symbol which always appears at the end of token sequence.
|
||||||
|
const eof = "\u0000"
|
||||||
|
|
||||||
|
// accept tries to accept a token in "p".
|
||||||
|
// This function consumes a token and returns it if it matches to the specified "term".
|
||||||
|
// If it doesn't match, the function does not consume any tokens and return an error.
|
||||||
|
func (p *parser) accept(term termType) (string, error) {
|
||||||
|
t := p.tokens[0]
|
||||||
|
switch term {
|
||||||
|
case "/", "*", "**", ".", "=", "{", "}":
|
||||||
|
if t != string(term) && t != "/" {
|
||||||
|
return "", fmt.Errorf("expected %q but got %q", term, t)
|
||||||
|
}
|
||||||
|
case typeEOF:
|
||||||
|
if t != eof {
|
||||||
|
return "", fmt.Errorf("expected EOF but got %q", t)
|
||||||
|
}
|
||||||
|
case typeIdent:
|
||||||
|
if err := expectIdent(t); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
case typeLiteral:
|
||||||
|
if err := expectPChars(t); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return "", fmt.Errorf("unknown termType %q", term)
|
||||||
|
}
|
||||||
|
p.tokens = p.tokens[1:]
|
||||||
|
p.accepted = append(p.accepted, t)
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// expectPChars determines if "t" consists of only pchars defined in RFC3986.
|
||||||
|
//
|
||||||
|
// https://www.ietf.org/rfc/rfc3986.txt, P.49
|
||||||
|
//
|
||||||
|
// pchar = unreserved / pct-encoded / sub-delims / ":" / "@"
|
||||||
|
// unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
|
||||||
|
// sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
|
||||||
|
// / "*" / "+" / "," / ";" / "="
|
||||||
|
// pct-encoded = "%" HEXDIG HEXDIG
|
||||||
|
func expectPChars(t string) error {
|
||||||
|
const (
|
||||||
|
init = iota
|
||||||
|
pct1
|
||||||
|
pct2
|
||||||
|
)
|
||||||
|
st := init
|
||||||
|
for _, r := range t {
|
||||||
|
if st != init {
|
||||||
|
if !isHexDigit(r) {
|
||||||
|
return fmt.Errorf("invalid hexdigit: %c(%U)", r, r)
|
||||||
|
}
|
||||||
|
switch st {
|
||||||
|
case pct1:
|
||||||
|
st = pct2
|
||||||
|
case pct2:
|
||||||
|
st = init
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// unreserved
|
||||||
|
switch {
|
||||||
|
case 'A' <= r && r <= 'Z':
|
||||||
|
continue
|
||||||
|
case 'a' <= r && r <= 'z':
|
||||||
|
continue
|
||||||
|
case '0' <= r && r <= '9':
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
switch r {
|
||||||
|
case '-', '.', '_', '~':
|
||||||
|
// unreserved
|
||||||
|
case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=':
|
||||||
|
// sub-delims
|
||||||
|
case ':', '@':
|
||||||
|
// rest of pchar
|
||||||
|
case '%':
|
||||||
|
// pct-encoded
|
||||||
|
st = pct1
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("invalid character in path segment: %q(%U)", r, r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if st != init {
|
||||||
|
return fmt.Errorf("invalid percent-encoding in %q", t)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// expectIdent determines if "ident" is a valid identifier in .proto schema ([[:alpha:]_][[:alphanum:]_]*).
|
||||||
|
func expectIdent(ident string) error {
|
||||||
|
if ident == "" {
|
||||||
|
return errors.New("empty identifier")
|
||||||
|
}
|
||||||
|
for pos, r := range ident {
|
||||||
|
switch {
|
||||||
|
case '0' <= r && r <= '9':
|
||||||
|
if pos == 0 {
|
||||||
|
return fmt.Errorf("identifier starting with digit: %s", ident)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
case 'A' <= r && r <= 'Z':
|
||||||
|
continue
|
||||||
|
case 'a' <= r && r <= 'z':
|
||||||
|
continue
|
||||||
|
case r == '_':
|
||||||
|
continue
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("invalid character %q(%U) in identifier: %s", r, r, ident)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func isHexDigit(r rune) bool {
|
||||||
|
switch {
|
||||||
|
case '0' <= r && r <= '9':
|
||||||
|
return true
|
||||||
|
case 'A' <= r && r <= 'F':
|
||||||
|
return true
|
||||||
|
case 'a' <= r && r <= 'f':
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
60
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/types.go
generated
vendored
Normal file
60
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/types.go
generated
vendored
Normal file
|
@ -0,0 +1,60 @@
|
||||||
|
package httprule
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type template struct {
|
||||||
|
segments []segment
|
||||||
|
verb string
|
||||||
|
template string
|
||||||
|
}
|
||||||
|
|
||||||
|
type segment interface {
|
||||||
|
fmt.Stringer
|
||||||
|
compile() (ops []op)
|
||||||
|
}
|
||||||
|
|
||||||
|
type wildcard struct{}
|
||||||
|
|
||||||
|
type deepWildcard struct{}
|
||||||
|
|
||||||
|
type literal string
|
||||||
|
|
||||||
|
type variable struct {
|
||||||
|
path string
|
||||||
|
segments []segment
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wildcard) String() string {
|
||||||
|
return "*"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (deepWildcard) String() string {
|
||||||
|
return "**"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l literal) String() string {
|
||||||
|
return string(l)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v variable) String() string {
|
||||||
|
var segs []string
|
||||||
|
for _, s := range v.segments {
|
||||||
|
segs = append(segs, s.String())
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("{%s=%s}", v.path, strings.Join(segs, "/"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t template) String() string {
|
||||||
|
var segs []string
|
||||||
|
for _, s := range t.segments {
|
||||||
|
segs = append(segs, s.String())
|
||||||
|
}
|
||||||
|
str := strings.Join(segs, "/")
|
||||||
|
if t.verb != "" {
|
||||||
|
str = fmt.Sprintf("%s:%s", str, t.verb)
|
||||||
|
}
|
||||||
|
return "/" + str
|
||||||
|
}
|
97
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel
generated
vendored
Normal file
97
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel
generated
vendored
Normal file
|
@ -0,0 +1,97 @@
|
||||||
|
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||||
|
|
||||||
|
package(default_visibility = ["//visibility:public"])
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "runtime",
|
||||||
|
srcs = [
|
||||||
|
"context.go",
|
||||||
|
"convert.go",
|
||||||
|
"doc.go",
|
||||||
|
"errors.go",
|
||||||
|
"fieldmask.go",
|
||||||
|
"handler.go",
|
||||||
|
"marshal_httpbodyproto.go",
|
||||||
|
"marshal_json.go",
|
||||||
|
"marshal_jsonpb.go",
|
||||||
|
"marshal_proto.go",
|
||||||
|
"marshaler.go",
|
||||||
|
"marshaler_registry.go",
|
||||||
|
"mux.go",
|
||||||
|
"pattern.go",
|
||||||
|
"proto2_convert.go",
|
||||||
|
"query.go",
|
||||||
|
],
|
||||||
|
importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/runtime",
|
||||||
|
deps = [
|
||||||
|
"//internal/httprule",
|
||||||
|
"//utilities",
|
||||||
|
"@go_googleapis//google/api:httpbody_go_proto",
|
||||||
|
"@org_golang_google_grpc//codes",
|
||||||
|
"@org_golang_google_grpc//grpclog",
|
||||||
|
"@org_golang_google_grpc//health/grpc_health_v1",
|
||||||
|
"@org_golang_google_grpc//metadata",
|
||||||
|
"@org_golang_google_grpc//status",
|
||||||
|
"@org_golang_google_protobuf//encoding/protojson",
|
||||||
|
"@org_golang_google_protobuf//proto",
|
||||||
|
"@org_golang_google_protobuf//reflect/protoreflect",
|
||||||
|
"@org_golang_google_protobuf//reflect/protoregistry",
|
||||||
|
"@org_golang_google_protobuf//types/known/durationpb",
|
||||||
|
"@org_golang_google_protobuf//types/known/fieldmaskpb",
|
||||||
|
"@org_golang_google_protobuf//types/known/structpb",
|
||||||
|
"@org_golang_google_protobuf//types/known/timestamppb",
|
||||||
|
"@org_golang_google_protobuf//types/known/wrapperspb",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
go_test(
|
||||||
|
name = "runtime_test",
|
||||||
|
size = "small",
|
||||||
|
srcs = [
|
||||||
|
"context_test.go",
|
||||||
|
"convert_test.go",
|
||||||
|
"errors_test.go",
|
||||||
|
"fieldmask_test.go",
|
||||||
|
"handler_test.go",
|
||||||
|
"marshal_httpbodyproto_test.go",
|
||||||
|
"marshal_json_test.go",
|
||||||
|
"marshal_jsonpb_test.go",
|
||||||
|
"marshal_proto_test.go",
|
||||||
|
"marshaler_registry_test.go",
|
||||||
|
"mux_internal_test.go",
|
||||||
|
"mux_test.go",
|
||||||
|
"pattern_test.go",
|
||||||
|
"query_fuzz_test.go",
|
||||||
|
"query_test.go",
|
||||||
|
],
|
||||||
|
embed = [":runtime"],
|
||||||
|
deps = [
|
||||||
|
"//runtime/internal/examplepb",
|
||||||
|
"//utilities",
|
||||||
|
"@com_github_google_go_cmp//cmp",
|
||||||
|
"@com_github_google_go_cmp//cmp/cmpopts",
|
||||||
|
"@go_googleapis//google/api:httpbody_go_proto",
|
||||||
|
"@go_googleapis//google/rpc:errdetails_go_proto",
|
||||||
|
"@go_googleapis//google/rpc:status_go_proto",
|
||||||
|
"@org_golang_google_grpc//:go_default_library",
|
||||||
|
"@org_golang_google_grpc//codes",
|
||||||
|
"@org_golang_google_grpc//health/grpc_health_v1",
|
||||||
|
"@org_golang_google_grpc//metadata",
|
||||||
|
"@org_golang_google_grpc//status",
|
||||||
|
"@org_golang_google_protobuf//encoding/protojson",
|
||||||
|
"@org_golang_google_protobuf//proto",
|
||||||
|
"@org_golang_google_protobuf//testing/protocmp",
|
||||||
|
"@org_golang_google_protobuf//types/known/durationpb",
|
||||||
|
"@org_golang_google_protobuf//types/known/emptypb",
|
||||||
|
"@org_golang_google_protobuf//types/known/fieldmaskpb",
|
||||||
|
"@org_golang_google_protobuf//types/known/structpb",
|
||||||
|
"@org_golang_google_protobuf//types/known/timestamppb",
|
||||||
|
"@org_golang_google_protobuf//types/known/wrapperspb",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
alias(
|
||||||
|
name = "go_default_library",
|
||||||
|
actual = ":runtime",
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
)
|
401
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go
generated
vendored
Normal file
401
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go
generated
vendored
Normal file
|
@ -0,0 +1,401 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/base64"
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"net/textproto"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MetadataHeaderPrefix is the http prefix that represents custom metadata
|
||||||
|
// parameters to or from a gRPC call.
|
||||||
|
const MetadataHeaderPrefix = "Grpc-Metadata-"
|
||||||
|
|
||||||
|
// MetadataPrefix is prepended to permanent HTTP header keys (as specified
|
||||||
|
// by the IANA) when added to the gRPC context.
|
||||||
|
const MetadataPrefix = "grpcgateway-"
|
||||||
|
|
||||||
|
// MetadataTrailerPrefix is prepended to gRPC metadata as it is converted to
|
||||||
|
// HTTP headers in a response handled by grpc-gateway
|
||||||
|
const MetadataTrailerPrefix = "Grpc-Trailer-"
|
||||||
|
|
||||||
|
const metadataGrpcTimeout = "Grpc-Timeout"
|
||||||
|
const metadataHeaderBinarySuffix = "-Bin"
|
||||||
|
|
||||||
|
const xForwardedFor = "X-Forwarded-For"
|
||||||
|
const xForwardedHost = "X-Forwarded-Host"
|
||||||
|
|
||||||
|
// DefaultContextTimeout is used for gRPC call context.WithTimeout whenever a Grpc-Timeout inbound
|
||||||
|
// header isn't present. If the value is 0 the sent `context` will not have a timeout.
|
||||||
|
var DefaultContextTimeout = 0 * time.Second
|
||||||
|
|
||||||
|
// malformedHTTPHeaders lists the headers that the gRPC server may reject outright as malformed.
|
||||||
|
// See https://github.com/grpc/grpc-go/pull/4803#issuecomment-986093310 for more context.
|
||||||
|
var malformedHTTPHeaders = map[string]struct{}{
|
||||||
|
"connection": {},
|
||||||
|
}
|
||||||
|
|
||||||
|
type (
|
||||||
|
rpcMethodKey struct{}
|
||||||
|
httpPathPatternKey struct{}
|
||||||
|
|
||||||
|
AnnotateContextOption func(ctx context.Context) context.Context
|
||||||
|
)
|
||||||
|
|
||||||
|
func WithHTTPPathPattern(pattern string) AnnotateContextOption {
|
||||||
|
return func(ctx context.Context) context.Context {
|
||||||
|
return withHTTPPathPattern(ctx, pattern)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeBinHeader(v string) ([]byte, error) {
|
||||||
|
if len(v)%4 == 0 {
|
||||||
|
// Input was padded, or padding was not necessary.
|
||||||
|
return base64.StdEncoding.DecodeString(v)
|
||||||
|
}
|
||||||
|
return base64.RawStdEncoding.DecodeString(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
AnnotateContext adds context information such as metadata from the request.
|
||||||
|
|
||||||
|
At a minimum, the RemoteAddr is included in the fashion of "X-Forwarded-For",
|
||||||
|
except that the forwarded destination is not another HTTP service but rather
|
||||||
|
a gRPC service.
|
||||||
|
*/
|
||||||
|
func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, error) {
|
||||||
|
ctx, md, err := annotateContext(ctx, mux, req, rpcMethodName, options...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if md == nil {
|
||||||
|
return ctx, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return metadata.NewOutgoingContext(ctx, md), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AnnotateIncomingContext adds context information such as metadata from the request.
|
||||||
|
// Attach metadata as incoming context.
|
||||||
|
func AnnotateIncomingContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, error) {
|
||||||
|
ctx, md, err := annotateContext(ctx, mux, req, rpcMethodName, options...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if md == nil {
|
||||||
|
return ctx, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return metadata.NewIncomingContext(ctx, md), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func isValidGRPCMetadataKey(key string) bool {
|
||||||
|
// Must be a valid gRPC "Header-Name" as defined here:
|
||||||
|
// https://github.com/grpc/grpc/blob/4b05dc88b724214d0c725c8e7442cbc7a61b1374/doc/PROTOCOL-HTTP2.md
|
||||||
|
// This means 0-9 a-z _ - .
|
||||||
|
// Only lowercase letters are valid in the wire protocol, but the client library will normalize
|
||||||
|
// uppercase ASCII to lowercase, so uppercase ASCII is also acceptable.
|
||||||
|
bytes := []byte(key) // gRPC validates strings on the byte level, not Unicode.
|
||||||
|
for _, ch := range bytes {
|
||||||
|
validLowercaseLetter := ch >= 'a' && ch <= 'z'
|
||||||
|
validUppercaseLetter := ch >= 'A' && ch <= 'Z'
|
||||||
|
validDigit := ch >= '0' && ch <= '9'
|
||||||
|
validOther := ch == '.' || ch == '-' || ch == '_'
|
||||||
|
if !validLowercaseLetter && !validUppercaseLetter && !validDigit && !validOther {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func isValidGRPCMetadataTextValue(textValue string) bool {
|
||||||
|
// Must be a valid gRPC "ASCII-Value" as defined here:
|
||||||
|
// https://github.com/grpc/grpc/blob/4b05dc88b724214d0c725c8e7442cbc7a61b1374/doc/PROTOCOL-HTTP2.md
|
||||||
|
// This means printable ASCII (including/plus spaces); 0x20 to 0x7E inclusive.
|
||||||
|
bytes := []byte(textValue) // gRPC validates strings on the byte level, not Unicode.
|
||||||
|
for _, ch := range bytes {
|
||||||
|
if ch < 0x20 || ch > 0x7E {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, metadata.MD, error) {
|
||||||
|
ctx = withRPCMethod(ctx, rpcMethodName)
|
||||||
|
for _, o := range options {
|
||||||
|
ctx = o(ctx)
|
||||||
|
}
|
||||||
|
timeout := DefaultContextTimeout
|
||||||
|
if tm := req.Header.Get(metadataGrpcTimeout); tm != "" {
|
||||||
|
var err error
|
||||||
|
timeout, err = timeoutDecode(tm)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, status.Errorf(codes.InvalidArgument, "invalid grpc-timeout: %s", tm)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var pairs []string
|
||||||
|
for key, vals := range req.Header {
|
||||||
|
key = textproto.CanonicalMIMEHeaderKey(key)
|
||||||
|
for _, val := range vals {
|
||||||
|
// For backwards-compatibility, pass through 'authorization' header with no prefix.
|
||||||
|
if key == "Authorization" {
|
||||||
|
pairs = append(pairs, "authorization", val)
|
||||||
|
}
|
||||||
|
if h, ok := mux.incomingHeaderMatcher(key); ok {
|
||||||
|
if !isValidGRPCMetadataKey(h) {
|
||||||
|
grpclog.Errorf("HTTP header name %q is not valid as gRPC metadata key; skipping", h)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Handles "-bin" metadata in grpc, since grpc will do another base64
|
||||||
|
// encode before sending to server, we need to decode it first.
|
||||||
|
if strings.HasSuffix(key, metadataHeaderBinarySuffix) {
|
||||||
|
b, err := decodeBinHeader(val)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, status.Errorf(codes.InvalidArgument, "invalid binary header %s: %s", key, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
val = string(b)
|
||||||
|
} else if !isValidGRPCMetadataTextValue(val) {
|
||||||
|
grpclog.Errorf("Value of HTTP header %q contains non-ASCII value (not valid as gRPC metadata): skipping", h)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
pairs = append(pairs, h, val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if host := req.Header.Get(xForwardedHost); host != "" {
|
||||||
|
pairs = append(pairs, strings.ToLower(xForwardedHost), host)
|
||||||
|
} else if req.Host != "" {
|
||||||
|
pairs = append(pairs, strings.ToLower(xForwardedHost), req.Host)
|
||||||
|
}
|
||||||
|
|
||||||
|
if addr := req.RemoteAddr; addr != "" {
|
||||||
|
if remoteIP, _, err := net.SplitHostPort(addr); err == nil {
|
||||||
|
if fwd := req.Header.Get(xForwardedFor); fwd == "" {
|
||||||
|
pairs = append(pairs, strings.ToLower(xForwardedFor), remoteIP)
|
||||||
|
} else {
|
||||||
|
pairs = append(pairs, strings.ToLower(xForwardedFor), fmt.Sprintf("%s, %s", fwd, remoteIP))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if timeout != 0 {
|
||||||
|
//nolint:govet // The context outlives this function
|
||||||
|
ctx, _ = context.WithTimeout(ctx, timeout)
|
||||||
|
}
|
||||||
|
if len(pairs) == 0 {
|
||||||
|
return ctx, nil, nil
|
||||||
|
}
|
||||||
|
md := metadata.Pairs(pairs...)
|
||||||
|
for _, mda := range mux.metadataAnnotators {
|
||||||
|
md = metadata.Join(md, mda(ctx, req))
|
||||||
|
}
|
||||||
|
return ctx, md, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerMetadata consists of metadata sent from gRPC server.
|
||||||
|
type ServerMetadata struct {
|
||||||
|
HeaderMD metadata.MD
|
||||||
|
TrailerMD metadata.MD
|
||||||
|
}
|
||||||
|
|
||||||
|
type serverMetadataKey struct{}
|
||||||
|
|
||||||
|
// NewServerMetadataContext creates a new context with ServerMetadata
|
||||||
|
func NewServerMetadataContext(ctx context.Context, md ServerMetadata) context.Context {
|
||||||
|
if ctx == nil {
|
||||||
|
ctx = context.Background()
|
||||||
|
}
|
||||||
|
return context.WithValue(ctx, serverMetadataKey{}, md)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerMetadataFromContext returns the ServerMetadata in ctx
|
||||||
|
func ServerMetadataFromContext(ctx context.Context) (md ServerMetadata, ok bool) {
|
||||||
|
if ctx == nil {
|
||||||
|
return md, false
|
||||||
|
}
|
||||||
|
md, ok = ctx.Value(serverMetadataKey{}).(ServerMetadata)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerTransportStream implements grpc.ServerTransportStream.
|
||||||
|
// It should only be used by the generated files to support grpc.SendHeader
|
||||||
|
// outside of gRPC server use.
|
||||||
|
type ServerTransportStream struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
header metadata.MD
|
||||||
|
trailer metadata.MD
|
||||||
|
}
|
||||||
|
|
||||||
|
// Method returns the method for the stream.
|
||||||
|
func (s *ServerTransportStream) Method() string {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header returns the header metadata of the stream.
|
||||||
|
func (s *ServerTransportStream) Header() metadata.MD {
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
return s.header.Copy()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetHeader sets the header metadata.
|
||||||
|
func (s *ServerTransportStream) SetHeader(md metadata.MD) error {
|
||||||
|
if md.Len() == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
s.mu.Lock()
|
||||||
|
s.header = metadata.Join(s.header, md)
|
||||||
|
s.mu.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SendHeader sets the header metadata.
|
||||||
|
func (s *ServerTransportStream) SendHeader(md metadata.MD) error {
|
||||||
|
return s.SetHeader(md)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Trailer returns the cached trailer metadata.
|
||||||
|
func (s *ServerTransportStream) Trailer() metadata.MD {
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
return s.trailer.Copy()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetTrailer sets the trailer metadata.
|
||||||
|
func (s *ServerTransportStream) SetTrailer(md metadata.MD) error {
|
||||||
|
if md.Len() == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
s.mu.Lock()
|
||||||
|
s.trailer = metadata.Join(s.trailer, md)
|
||||||
|
s.mu.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func timeoutDecode(s string) (time.Duration, error) {
|
||||||
|
size := len(s)
|
||||||
|
if size < 2 {
|
||||||
|
return 0, fmt.Errorf("timeout string is too short: %q", s)
|
||||||
|
}
|
||||||
|
d, ok := timeoutUnitToDuration(s[size-1])
|
||||||
|
if !ok {
|
||||||
|
return 0, fmt.Errorf("timeout unit is not recognized: %q", s)
|
||||||
|
}
|
||||||
|
t, err := strconv.ParseInt(s[:size-1], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return d * time.Duration(t), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func timeoutUnitToDuration(u uint8) (d time.Duration, ok bool) {
|
||||||
|
switch u {
|
||||||
|
case 'H':
|
||||||
|
return time.Hour, true
|
||||||
|
case 'M':
|
||||||
|
return time.Minute, true
|
||||||
|
case 'S':
|
||||||
|
return time.Second, true
|
||||||
|
case 'm':
|
||||||
|
return time.Millisecond, true
|
||||||
|
case 'u':
|
||||||
|
return time.Microsecond, true
|
||||||
|
case 'n':
|
||||||
|
return time.Nanosecond, true
|
||||||
|
default:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// isPermanentHTTPHeader checks whether hdr belongs to the list of
|
||||||
|
// permanent request headers maintained by IANA.
|
||||||
|
// http://www.iana.org/assignments/message-headers/message-headers.xml
|
||||||
|
func isPermanentHTTPHeader(hdr string) bool {
|
||||||
|
switch hdr {
|
||||||
|
case
|
||||||
|
"Accept",
|
||||||
|
"Accept-Charset",
|
||||||
|
"Accept-Language",
|
||||||
|
"Accept-Ranges",
|
||||||
|
"Authorization",
|
||||||
|
"Cache-Control",
|
||||||
|
"Content-Type",
|
||||||
|
"Cookie",
|
||||||
|
"Date",
|
||||||
|
"Expect",
|
||||||
|
"From",
|
||||||
|
"Host",
|
||||||
|
"If-Match",
|
||||||
|
"If-Modified-Since",
|
||||||
|
"If-None-Match",
|
||||||
|
"If-Schedule-Tag-Match",
|
||||||
|
"If-Unmodified-Since",
|
||||||
|
"Max-Forwards",
|
||||||
|
"Origin",
|
||||||
|
"Pragma",
|
||||||
|
"Referer",
|
||||||
|
"User-Agent",
|
||||||
|
"Via",
|
||||||
|
"Warning":
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// isMalformedHTTPHeader checks whether header belongs to the list of
|
||||||
|
// "malformed headers" and would be rejected by the gRPC server.
|
||||||
|
func isMalformedHTTPHeader(header string) bool {
|
||||||
|
_, isMalformed := malformedHTTPHeaders[strings.ToLower(header)]
|
||||||
|
return isMalformed
|
||||||
|
}
|
||||||
|
|
||||||
|
// RPCMethod returns the method string for the server context. The returned
|
||||||
|
// string is in the format of "/package.service/method".
|
||||||
|
func RPCMethod(ctx context.Context) (string, bool) {
|
||||||
|
m := ctx.Value(rpcMethodKey{})
|
||||||
|
if m == nil {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
ms, ok := m.(string)
|
||||||
|
if !ok {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
return ms, true
|
||||||
|
}
|
||||||
|
|
||||||
|
func withRPCMethod(ctx context.Context, rpcMethodName string) context.Context {
|
||||||
|
return context.WithValue(ctx, rpcMethodKey{}, rpcMethodName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HTTPPathPattern returns the HTTP path pattern string relating to the HTTP handler, if one exists.
|
||||||
|
// The format of the returned string is defined by the google.api.http path template type.
|
||||||
|
func HTTPPathPattern(ctx context.Context) (string, bool) {
|
||||||
|
m := ctx.Value(httpPathPatternKey{})
|
||||||
|
if m == nil {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
ms, ok := m.(string)
|
||||||
|
if !ok {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
return ms, true
|
||||||
|
}
|
||||||
|
|
||||||
|
func withHTTPPathPattern(ctx context.Context, httpPathPattern string) context.Context {
|
||||||
|
return context.WithValue(ctx, httpPathPatternKey{}, httpPathPattern)
|
||||||
|
}
|
318
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go
generated
vendored
Normal file
318
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go
generated
vendored
Normal file
|
@ -0,0 +1,318 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/base64"
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"google.golang.org/protobuf/encoding/protojson"
|
||||||
|
"google.golang.org/protobuf/types/known/durationpb"
|
||||||
|
"google.golang.org/protobuf/types/known/timestamppb"
|
||||||
|
"google.golang.org/protobuf/types/known/wrapperspb"
|
||||||
|
)
|
||||||
|
|
||||||
|
// String just returns the given string.
|
||||||
|
// It is just for compatibility to other types.
|
||||||
|
func String(val string) (string, error) {
|
||||||
|
return val, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringSlice converts 'val' where individual strings are separated by
|
||||||
|
// 'sep' into a string slice.
|
||||||
|
func StringSlice(val, sep string) ([]string, error) {
|
||||||
|
return strings.Split(val, sep), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bool converts the given string representation of a boolean value into bool.
|
||||||
|
func Bool(val string) (bool, error) {
|
||||||
|
return strconv.ParseBool(val)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BoolSlice converts 'val' where individual booleans are separated by
|
||||||
|
// 'sep' into a bool slice.
|
||||||
|
func BoolSlice(val, sep string) ([]bool, error) {
|
||||||
|
s := strings.Split(val, sep)
|
||||||
|
values := make([]bool, len(s))
|
||||||
|
for i, v := range s {
|
||||||
|
value, err := Bool(v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
values[i] = value
|
||||||
|
}
|
||||||
|
return values, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64 converts the given string representation into representation of a floating point number into float64.
|
||||||
|
func Float64(val string) (float64, error) {
|
||||||
|
return strconv.ParseFloat(val, 64)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64Slice converts 'val' where individual floating point numbers are separated by
|
||||||
|
// 'sep' into a float64 slice.
|
||||||
|
func Float64Slice(val, sep string) ([]float64, error) {
|
||||||
|
s := strings.Split(val, sep)
|
||||||
|
values := make([]float64, len(s))
|
||||||
|
for i, v := range s {
|
||||||
|
value, err := Float64(v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
values[i] = value
|
||||||
|
}
|
||||||
|
return values, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float32 converts the given string representation of a floating point number into float32.
|
||||||
|
func Float32(val string) (float32, error) {
|
||||||
|
f, err := strconv.ParseFloat(val, 32)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return float32(f), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float32Slice converts 'val' where individual floating point numbers are separated by
|
||||||
|
// 'sep' into a float32 slice.
|
||||||
|
func Float32Slice(val, sep string) ([]float32, error) {
|
||||||
|
s := strings.Split(val, sep)
|
||||||
|
values := make([]float32, len(s))
|
||||||
|
for i, v := range s {
|
||||||
|
value, err := Float32(v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
values[i] = value
|
||||||
|
}
|
||||||
|
return values, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int64 converts the given string representation of an integer into int64.
|
||||||
|
func Int64(val string) (int64, error) {
|
||||||
|
return strconv.ParseInt(val, 0, 64)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int64Slice converts 'val' where individual integers are separated by
|
||||||
|
// 'sep' into a int64 slice.
|
||||||
|
func Int64Slice(val, sep string) ([]int64, error) {
|
||||||
|
s := strings.Split(val, sep)
|
||||||
|
values := make([]int64, len(s))
|
||||||
|
for i, v := range s {
|
||||||
|
value, err := Int64(v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
values[i] = value
|
||||||
|
}
|
||||||
|
return values, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int32 converts the given string representation of an integer into int32.
|
||||||
|
func Int32(val string) (int32, error) {
|
||||||
|
i, err := strconv.ParseInt(val, 0, 32)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return int32(i), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int32Slice converts 'val' where individual integers are separated by
|
||||||
|
// 'sep' into a int32 slice.
|
||||||
|
func Int32Slice(val, sep string) ([]int32, error) {
|
||||||
|
s := strings.Split(val, sep)
|
||||||
|
values := make([]int32, len(s))
|
||||||
|
for i, v := range s {
|
||||||
|
value, err := Int32(v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
values[i] = value
|
||||||
|
}
|
||||||
|
return values, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint64 converts the given string representation of an integer into uint64.
|
||||||
|
func Uint64(val string) (uint64, error) {
|
||||||
|
return strconv.ParseUint(val, 0, 64)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint64Slice converts 'val' where individual integers are separated by
|
||||||
|
// 'sep' into a uint64 slice.
|
||||||
|
func Uint64Slice(val, sep string) ([]uint64, error) {
|
||||||
|
s := strings.Split(val, sep)
|
||||||
|
values := make([]uint64, len(s))
|
||||||
|
for i, v := range s {
|
||||||
|
value, err := Uint64(v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
values[i] = value
|
||||||
|
}
|
||||||
|
return values, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint32 converts the given string representation of an integer into uint32.
|
||||||
|
func Uint32(val string) (uint32, error) {
|
||||||
|
i, err := strconv.ParseUint(val, 0, 32)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return uint32(i), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint32Slice converts 'val' where individual integers are separated by
|
||||||
|
// 'sep' into a uint32 slice.
|
||||||
|
func Uint32Slice(val, sep string) ([]uint32, error) {
|
||||||
|
s := strings.Split(val, sep)
|
||||||
|
values := make([]uint32, len(s))
|
||||||
|
for i, v := range s {
|
||||||
|
value, err := Uint32(v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
values[i] = value
|
||||||
|
}
|
||||||
|
return values, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bytes converts the given string representation of a byte sequence into a slice of bytes
|
||||||
|
// A bytes sequence is encoded in URL-safe base64 without padding
|
||||||
|
func Bytes(val string) ([]byte, error) {
|
||||||
|
b, err := base64.StdEncoding.DecodeString(val)
|
||||||
|
if err != nil {
|
||||||
|
b, err = base64.URLEncoding.DecodeString(val)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BytesSlice converts 'val' where individual bytes sequences, encoded in URL-safe
|
||||||
|
// base64 without padding, are separated by 'sep' into a slice of bytes slices slice.
|
||||||
|
func BytesSlice(val, sep string) ([][]byte, error) {
|
||||||
|
s := strings.Split(val, sep)
|
||||||
|
values := make([][]byte, len(s))
|
||||||
|
for i, v := range s {
|
||||||
|
value, err := Bytes(v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
values[i] = value
|
||||||
|
}
|
||||||
|
return values, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Timestamp converts the given RFC3339 formatted string into a timestamp.Timestamp.
|
||||||
|
func Timestamp(val string) (*timestamppb.Timestamp, error) {
|
||||||
|
var r timestamppb.Timestamp
|
||||||
|
val = strconv.Quote(strings.Trim(val, `"`))
|
||||||
|
unmarshaler := &protojson.UnmarshalOptions{}
|
||||||
|
if err := unmarshaler.Unmarshal([]byte(val), &r); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &r, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Duration converts the given string into a timestamp.Duration.
|
||||||
|
func Duration(val string) (*durationpb.Duration, error) {
|
||||||
|
var r durationpb.Duration
|
||||||
|
val = strconv.Quote(strings.Trim(val, `"`))
|
||||||
|
unmarshaler := &protojson.UnmarshalOptions{}
|
||||||
|
if err := unmarshaler.Unmarshal([]byte(val), &r); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &r, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enum converts the given string into an int32 that should be type casted into the
|
||||||
|
// correct enum proto type.
|
||||||
|
func Enum(val string, enumValMap map[string]int32) (int32, error) {
|
||||||
|
e, ok := enumValMap[val]
|
||||||
|
if ok {
|
||||||
|
return e, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
i, err := Int32(val)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("%s is not valid", val)
|
||||||
|
}
|
||||||
|
for _, v := range enumValMap {
|
||||||
|
if v == i {
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0, fmt.Errorf("%s is not valid", val)
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnumSlice converts 'val' where individual enums are separated by 'sep'
|
||||||
|
// into a int32 slice. Each individual int32 should be type casted into the
|
||||||
|
// correct enum proto type.
|
||||||
|
func EnumSlice(val, sep string, enumValMap map[string]int32) ([]int32, error) {
|
||||||
|
s := strings.Split(val, sep)
|
||||||
|
values := make([]int32, len(s))
|
||||||
|
for i, v := range s {
|
||||||
|
value, err := Enum(v, enumValMap)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
values[i] = value
|
||||||
|
}
|
||||||
|
return values, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Support for google.protobuf.wrappers on top of primitive types
|
||||||
|
|
||||||
|
// StringValue well-known type support as wrapper around string type
|
||||||
|
func StringValue(val string) (*wrapperspb.StringValue, error) {
|
||||||
|
return wrapperspb.String(val), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FloatValue well-known type support as wrapper around float32 type
|
||||||
|
func FloatValue(val string) (*wrapperspb.FloatValue, error) {
|
||||||
|
parsedVal, err := Float32(val)
|
||||||
|
return wrapperspb.Float(parsedVal), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// DoubleValue well-known type support as wrapper around float64 type
|
||||||
|
func DoubleValue(val string) (*wrapperspb.DoubleValue, error) {
|
||||||
|
parsedVal, err := Float64(val)
|
||||||
|
return wrapperspb.Double(parsedVal), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// BoolValue well-known type support as wrapper around bool type
|
||||||
|
func BoolValue(val string) (*wrapperspb.BoolValue, error) {
|
||||||
|
parsedVal, err := Bool(val)
|
||||||
|
return wrapperspb.Bool(parsedVal), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int32Value well-known type support as wrapper around int32 type
|
||||||
|
func Int32Value(val string) (*wrapperspb.Int32Value, error) {
|
||||||
|
parsedVal, err := Int32(val)
|
||||||
|
return wrapperspb.Int32(parsedVal), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// UInt32Value well-known type support as wrapper around uint32 type
|
||||||
|
func UInt32Value(val string) (*wrapperspb.UInt32Value, error) {
|
||||||
|
parsedVal, err := Uint32(val)
|
||||||
|
return wrapperspb.UInt32(parsedVal), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int64Value well-known type support as wrapper around int64 type
|
||||||
|
func Int64Value(val string) (*wrapperspb.Int64Value, error) {
|
||||||
|
parsedVal, err := Int64(val)
|
||||||
|
return wrapperspb.Int64(parsedVal), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// UInt64Value well-known type support as wrapper around uint64 type
|
||||||
|
func UInt64Value(val string) (*wrapperspb.UInt64Value, error) {
|
||||||
|
parsedVal, err := Uint64(val)
|
||||||
|
return wrapperspb.UInt64(parsedVal), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// BytesValue well-known type support as wrapper around bytes[] type
|
||||||
|
func BytesValue(val string) (*wrapperspb.BytesValue, error) {
|
||||||
|
parsedVal, err := Bytes(val)
|
||||||
|
return wrapperspb.Bytes(parsedVal), err
|
||||||
|
}
|
|
@ -0,0 +1,5 @@
|
||||||
|
/*
|
||||||
|
Package runtime contains runtime helper functions used by
|
||||||
|
servers which protoc-gen-grpc-gateway generates.
|
||||||
|
*/
|
||||||
|
package runtime
|
181
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go
generated
vendored
Normal file
181
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go
generated
vendored
Normal file
|
@ -0,0 +1,181 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ErrorHandlerFunc is the signature used to configure error handling.
|
||||||
|
type ErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, error)
|
||||||
|
|
||||||
|
// StreamErrorHandlerFunc is the signature used to configure stream error handling.
|
||||||
|
type StreamErrorHandlerFunc func(context.Context, error) *status.Status
|
||||||
|
|
||||||
|
// RoutingErrorHandlerFunc is the signature used to configure error handling for routing errors.
|
||||||
|
type RoutingErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, int)
|
||||||
|
|
||||||
|
// HTTPStatusError is the error to use when needing to provide a different HTTP status code for an error
|
||||||
|
// passed to the DefaultRoutingErrorHandler.
|
||||||
|
type HTTPStatusError struct {
|
||||||
|
HTTPStatus int
|
||||||
|
Err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *HTTPStatusError) Error() string {
|
||||||
|
return e.Err.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
// HTTPStatusFromCode converts a gRPC error code into the corresponding HTTP response status.
|
||||||
|
// See: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto
|
||||||
|
func HTTPStatusFromCode(code codes.Code) int {
|
||||||
|
switch code {
|
||||||
|
case codes.OK:
|
||||||
|
return http.StatusOK
|
||||||
|
case codes.Canceled:
|
||||||
|
return 499
|
||||||
|
case codes.Unknown:
|
||||||
|
return http.StatusInternalServerError
|
||||||
|
case codes.InvalidArgument:
|
||||||
|
return http.StatusBadRequest
|
||||||
|
case codes.DeadlineExceeded:
|
||||||
|
return http.StatusGatewayTimeout
|
||||||
|
case codes.NotFound:
|
||||||
|
return http.StatusNotFound
|
||||||
|
case codes.AlreadyExists:
|
||||||
|
return http.StatusConflict
|
||||||
|
case codes.PermissionDenied:
|
||||||
|
return http.StatusForbidden
|
||||||
|
case codes.Unauthenticated:
|
||||||
|
return http.StatusUnauthorized
|
||||||
|
case codes.ResourceExhausted:
|
||||||
|
return http.StatusTooManyRequests
|
||||||
|
case codes.FailedPrecondition:
|
||||||
|
// Note, this deliberately doesn't translate to the similarly named '412 Precondition Failed' HTTP response status.
|
||||||
|
return http.StatusBadRequest
|
||||||
|
case codes.Aborted:
|
||||||
|
return http.StatusConflict
|
||||||
|
case codes.OutOfRange:
|
||||||
|
return http.StatusBadRequest
|
||||||
|
case codes.Unimplemented:
|
||||||
|
return http.StatusNotImplemented
|
||||||
|
case codes.Internal:
|
||||||
|
return http.StatusInternalServerError
|
||||||
|
case codes.Unavailable:
|
||||||
|
return http.StatusServiceUnavailable
|
||||||
|
case codes.DataLoss:
|
||||||
|
return http.StatusInternalServerError
|
||||||
|
default:
|
||||||
|
grpclog.Infof("Unknown gRPC error code: %v", code)
|
||||||
|
return http.StatusInternalServerError
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// HTTPError uses the mux-configured error handler.
|
||||||
|
func HTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) {
|
||||||
|
mux.errorHandler(ctx, mux, marshaler, w, r, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultHTTPErrorHandler is the default error handler.
|
||||||
|
// If "err" is a gRPC Status, the function replies with the status code mapped by HTTPStatusFromCode.
|
||||||
|
// If "err" is a HTTPStatusError, the function replies with the status code provide by that struct. This is
|
||||||
|
// intended to allow passing through of specific statuses via the function set via WithRoutingErrorHandler
|
||||||
|
// for the ServeMux constructor to handle edge cases which the standard mappings in HTTPStatusFromCode
|
||||||
|
// are insufficient for.
|
||||||
|
// If otherwise, it replies with http.StatusInternalServerError.
|
||||||
|
//
|
||||||
|
// The response body written by this function is a Status message marshaled by the Marshaler.
|
||||||
|
func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) {
|
||||||
|
// return Internal when Marshal failed
|
||||||
|
const fallback = `{"code": 13, "message": "failed to marshal error message"}`
|
||||||
|
|
||||||
|
var customStatus *HTTPStatusError
|
||||||
|
if errors.As(err, &customStatus) {
|
||||||
|
err = customStatus.Err
|
||||||
|
}
|
||||||
|
|
||||||
|
s := status.Convert(err)
|
||||||
|
pb := s.Proto()
|
||||||
|
|
||||||
|
w.Header().Del("Trailer")
|
||||||
|
w.Header().Del("Transfer-Encoding")
|
||||||
|
|
||||||
|
contentType := marshaler.ContentType(pb)
|
||||||
|
w.Header().Set("Content-Type", contentType)
|
||||||
|
|
||||||
|
if s.Code() == codes.Unauthenticated {
|
||||||
|
w.Header().Set("WWW-Authenticate", s.Message())
|
||||||
|
}
|
||||||
|
|
||||||
|
buf, merr := marshaler.Marshal(pb)
|
||||||
|
if merr != nil {
|
||||||
|
grpclog.Infof("Failed to marshal error message %q: %v", s, merr)
|
||||||
|
w.WriteHeader(http.StatusInternalServerError)
|
||||||
|
if _, err := io.WriteString(w, fallback); err != nil {
|
||||||
|
grpclog.Infof("Failed to write response: %v", err)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
md, ok := ServerMetadataFromContext(ctx)
|
||||||
|
if !ok {
|
||||||
|
grpclog.Infof("Failed to extract ServerMetadata from context")
|
||||||
|
}
|
||||||
|
|
||||||
|
handleForwardResponseServerMetadata(w, mux, md)
|
||||||
|
|
||||||
|
// RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2
|
||||||
|
// Unless the request includes a TE header field indicating "trailers"
|
||||||
|
// is acceptable, as described in Section 4.3, a server SHOULD NOT
|
||||||
|
// generate trailer fields that it believes are necessary for the user
|
||||||
|
// agent to receive.
|
||||||
|
doForwardTrailers := requestAcceptsTrailers(r)
|
||||||
|
|
||||||
|
if doForwardTrailers {
|
||||||
|
handleForwardResponseTrailerHeader(w, md)
|
||||||
|
w.Header().Set("Transfer-Encoding", "chunked")
|
||||||
|
}
|
||||||
|
|
||||||
|
st := HTTPStatusFromCode(s.Code())
|
||||||
|
if customStatus != nil {
|
||||||
|
st = customStatus.HTTPStatus
|
||||||
|
}
|
||||||
|
|
||||||
|
w.WriteHeader(st)
|
||||||
|
if _, err := w.Write(buf); err != nil {
|
||||||
|
grpclog.Infof("Failed to write response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if doForwardTrailers {
|
||||||
|
handleForwardResponseTrailer(w, md)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func DefaultStreamErrorHandler(_ context.Context, err error) *status.Status {
|
||||||
|
return status.Convert(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultRoutingErrorHandler is our default handler for routing errors.
|
||||||
|
// By default http error codes mapped on the following error codes:
|
||||||
|
//
|
||||||
|
// NotFound -> grpc.NotFound
|
||||||
|
// StatusBadRequest -> grpc.InvalidArgument
|
||||||
|
// MethodNotAllowed -> grpc.Unimplemented
|
||||||
|
// Other -> grpc.Internal, method is not expecting to be called for anything else
|
||||||
|
func DefaultRoutingErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, httpStatus int) {
|
||||||
|
sterr := status.Error(codes.Internal, "Unexpected routing error")
|
||||||
|
switch httpStatus {
|
||||||
|
case http.StatusBadRequest:
|
||||||
|
sterr = status.Error(codes.InvalidArgument, http.StatusText(httpStatus))
|
||||||
|
case http.StatusMethodNotAllowed:
|
||||||
|
sterr = status.Error(codes.Unimplemented, http.StatusText(httpStatus))
|
||||||
|
case http.StatusNotFound:
|
||||||
|
sterr = status.Error(codes.NotFound, http.StatusText(httpStatus))
|
||||||
|
}
|
||||||
|
mux.errorHandler(ctx, mux, marshaler, w, r, sterr)
|
||||||
|
}
|
166
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go
generated
vendored
Normal file
166
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go
generated
vendored
Normal file
|
@ -0,0 +1,166 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"sort"
|
||||||
|
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
field_mask "google.golang.org/protobuf/types/known/fieldmaskpb"
|
||||||
|
)
|
||||||
|
|
||||||
|
func getFieldByName(fields protoreflect.FieldDescriptors, name string) protoreflect.FieldDescriptor {
|
||||||
|
fd := fields.ByName(protoreflect.Name(name))
|
||||||
|
if fd != nil {
|
||||||
|
return fd
|
||||||
|
}
|
||||||
|
|
||||||
|
return fields.ByJSONName(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FieldMaskFromRequestBody creates a FieldMask printing all complete paths from the JSON body.
|
||||||
|
func FieldMaskFromRequestBody(r io.Reader, msg proto.Message) (*field_mask.FieldMask, error) {
|
||||||
|
fm := &field_mask.FieldMask{}
|
||||||
|
var root interface{}
|
||||||
|
|
||||||
|
if err := json.NewDecoder(r).Decode(&root); err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
return fm, nil
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
queue := []fieldMaskPathItem{{node: root, msg: msg.ProtoReflect()}}
|
||||||
|
for len(queue) > 0 {
|
||||||
|
// dequeue an item
|
||||||
|
item := queue[0]
|
||||||
|
queue = queue[1:]
|
||||||
|
|
||||||
|
m, ok := item.node.(map[string]interface{})
|
||||||
|
switch {
|
||||||
|
case ok:
|
||||||
|
// if the item is an object, then enqueue all of its children
|
||||||
|
for k, v := range m {
|
||||||
|
if item.msg == nil {
|
||||||
|
return nil, errors.New("JSON structure did not match request type")
|
||||||
|
}
|
||||||
|
|
||||||
|
fd := getFieldByName(item.msg.Descriptor().Fields(), k)
|
||||||
|
if fd == nil {
|
||||||
|
return nil, fmt.Errorf("could not find field %q in %q", k, item.msg.Descriptor().FullName())
|
||||||
|
}
|
||||||
|
|
||||||
|
if isDynamicProtoMessage(fd.Message()) {
|
||||||
|
for _, p := range buildPathsBlindly(string(fd.FullName().Name()), v) {
|
||||||
|
newPath := p
|
||||||
|
if item.path != "" {
|
||||||
|
newPath = item.path + "." + newPath
|
||||||
|
}
|
||||||
|
queue = append(queue, fieldMaskPathItem{path: newPath})
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if isProtobufAnyMessage(fd.Message()) && !fd.IsList() {
|
||||||
|
_, hasTypeField := v.(map[string]interface{})["@type"]
|
||||||
|
if hasTypeField {
|
||||||
|
queue = append(queue, fieldMaskPathItem{path: k})
|
||||||
|
continue
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("could not find field @type in %q in message %q", k, item.msg.Descriptor().FullName())
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
child := fieldMaskPathItem{
|
||||||
|
node: v,
|
||||||
|
}
|
||||||
|
if item.path == "" {
|
||||||
|
child.path = string(fd.FullName().Name())
|
||||||
|
} else {
|
||||||
|
child.path = item.path + "." + string(fd.FullName().Name())
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case fd.IsList(), fd.IsMap():
|
||||||
|
// As per: https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/field_mask.proto#L85-L86
|
||||||
|
// Do not recurse into repeated fields. The repeated field goes on the end of the path and we stop.
|
||||||
|
fm.Paths = append(fm.Paths, child.path)
|
||||||
|
case fd.Message() != nil:
|
||||||
|
child.msg = item.msg.Get(fd).Message()
|
||||||
|
fallthrough
|
||||||
|
default:
|
||||||
|
queue = append(queue, child)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case len(item.path) > 0:
|
||||||
|
// otherwise, it's a leaf node so print its path
|
||||||
|
fm.Paths = append(fm.Paths, item.path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort for deterministic output in the presence
|
||||||
|
// of repeated fields.
|
||||||
|
sort.Strings(fm.Paths)
|
||||||
|
|
||||||
|
return fm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func isProtobufAnyMessage(md protoreflect.MessageDescriptor) bool {
|
||||||
|
return md != nil && (md.FullName() == "google.protobuf.Any")
|
||||||
|
}
|
||||||
|
|
||||||
|
func isDynamicProtoMessage(md protoreflect.MessageDescriptor) bool {
|
||||||
|
return md != nil && (md.FullName() == "google.protobuf.Struct" || md.FullName() == "google.protobuf.Value")
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildPathsBlindly does not attempt to match proto field names to the
|
||||||
|
// json value keys. Instead it relies completely on the structure of
|
||||||
|
// the unmarshalled json contained within in.
|
||||||
|
// Returns a slice containing all subpaths with the root at the
|
||||||
|
// passed in name and json value.
|
||||||
|
func buildPathsBlindly(name string, in interface{}) []string {
|
||||||
|
m, ok := in.(map[string]interface{})
|
||||||
|
if !ok {
|
||||||
|
return []string{name}
|
||||||
|
}
|
||||||
|
|
||||||
|
var paths []string
|
||||||
|
queue := []fieldMaskPathItem{{path: name, node: m}}
|
||||||
|
for len(queue) > 0 {
|
||||||
|
cur := queue[0]
|
||||||
|
queue = queue[1:]
|
||||||
|
|
||||||
|
m, ok := cur.node.(map[string]interface{})
|
||||||
|
if !ok {
|
||||||
|
// This should never happen since we should always check that we only add
|
||||||
|
// nodes of type map[string]interface{} to the queue.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for k, v := range m {
|
||||||
|
if mi, ok := v.(map[string]interface{}); ok {
|
||||||
|
queue = append(queue, fieldMaskPathItem{path: cur.path + "." + k, node: mi})
|
||||||
|
} else {
|
||||||
|
// This is not a struct, so there are no more levels to descend.
|
||||||
|
curPath := cur.path + "." + k
|
||||||
|
paths = append(paths, curPath)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return paths
|
||||||
|
}
|
||||||
|
|
||||||
|
// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask
|
||||||
|
type fieldMaskPathItem struct {
|
||||||
|
// the list of prior fields leading up to node connected by dots
|
||||||
|
path string
|
||||||
|
|
||||||
|
// a generic decoded json object the current item to inspect for further path extraction
|
||||||
|
node interface{}
|
||||||
|
|
||||||
|
// parent message
|
||||||
|
msg protoreflect.Message
|
||||||
|
}
|
227
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go
generated
vendored
Normal file
227
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go
generated
vendored
Normal file
|
@ -0,0 +1,227 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/textproto"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"google.golang.org/genproto/googleapis/api/httpbody"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ForwardResponseStream forwards the stream from gRPC server to REST client.
|
||||||
|
func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
|
||||||
|
f, ok := w.(http.Flusher)
|
||||||
|
if !ok {
|
||||||
|
grpclog.Infof("Flush not supported in %T", w)
|
||||||
|
http.Error(w, "unexpected type of web server", http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
md, ok := ServerMetadataFromContext(ctx)
|
||||||
|
if !ok {
|
||||||
|
grpclog.Infof("Failed to extract ServerMetadata from context")
|
||||||
|
http.Error(w, "unexpected error", http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
handleForwardResponseServerMetadata(w, mux, md)
|
||||||
|
|
||||||
|
w.Header().Set("Transfer-Encoding", "chunked")
|
||||||
|
if err := handleForwardResponseOptions(ctx, w, nil, opts); err != nil {
|
||||||
|
HTTPError(ctx, mux, marshaler, w, req, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var delimiter []byte
|
||||||
|
if d, ok := marshaler.(Delimited); ok {
|
||||||
|
delimiter = d.Delimiter()
|
||||||
|
} else {
|
||||||
|
delimiter = []byte("\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
var wroteHeader bool
|
||||||
|
for {
|
||||||
|
resp, err := recv()
|
||||||
|
if err == io.EOF {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil {
|
||||||
|
handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if !wroteHeader {
|
||||||
|
w.Header().Set("Content-Type", marshaler.ContentType(resp))
|
||||||
|
}
|
||||||
|
|
||||||
|
var buf []byte
|
||||||
|
httpBody, isHTTPBody := resp.(*httpbody.HttpBody)
|
||||||
|
switch {
|
||||||
|
case resp == nil:
|
||||||
|
buf, err = marshaler.Marshal(errorChunk(status.New(codes.Internal, "empty response")))
|
||||||
|
case isHTTPBody:
|
||||||
|
buf = httpBody.GetData()
|
||||||
|
default:
|
||||||
|
result := map[string]interface{}{"result": resp}
|
||||||
|
if rb, ok := resp.(responseBody); ok {
|
||||||
|
result["result"] = rb.XXX_ResponseBody()
|
||||||
|
}
|
||||||
|
|
||||||
|
buf, err = marshaler.Marshal(result)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
grpclog.Infof("Failed to marshal response chunk: %v", err)
|
||||||
|
handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if _, err := w.Write(buf); err != nil {
|
||||||
|
grpclog.Infof("Failed to send response chunk: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
wroteHeader = true
|
||||||
|
if _, err := w.Write(delimiter); err != nil {
|
||||||
|
grpclog.Infof("Failed to send delimiter chunk: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
f.Flush()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleForwardResponseServerMetadata(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) {
|
||||||
|
for k, vs := range md.HeaderMD {
|
||||||
|
if h, ok := mux.outgoingHeaderMatcher(k); ok {
|
||||||
|
for _, v := range vs {
|
||||||
|
w.Header().Add(h, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleForwardResponseTrailerHeader(w http.ResponseWriter, md ServerMetadata) {
|
||||||
|
for k := range md.TrailerMD {
|
||||||
|
tKey := textproto.CanonicalMIMEHeaderKey(fmt.Sprintf("%s%s", MetadataTrailerPrefix, k))
|
||||||
|
w.Header().Add("Trailer", tKey)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleForwardResponseTrailer(w http.ResponseWriter, md ServerMetadata) {
|
||||||
|
for k, vs := range md.TrailerMD {
|
||||||
|
tKey := fmt.Sprintf("%s%s", MetadataTrailerPrefix, k)
|
||||||
|
for _, v := range vs {
|
||||||
|
w.Header().Add(tKey, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// responseBody interface contains method for getting field for marshaling to the response body
|
||||||
|
// this method is generated for response struct from the value of `response_body` in the `google.api.HttpRule`
|
||||||
|
type responseBody interface {
|
||||||
|
XXX_ResponseBody() interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForwardResponseMessage forwards the message "resp" from gRPC server to REST client.
|
||||||
|
func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
|
||||||
|
md, ok := ServerMetadataFromContext(ctx)
|
||||||
|
if !ok {
|
||||||
|
grpclog.Infof("Failed to extract ServerMetadata from context")
|
||||||
|
}
|
||||||
|
|
||||||
|
handleForwardResponseServerMetadata(w, mux, md)
|
||||||
|
|
||||||
|
// RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2
|
||||||
|
// Unless the request includes a TE header field indicating "trailers"
|
||||||
|
// is acceptable, as described in Section 4.3, a server SHOULD NOT
|
||||||
|
// generate trailer fields that it believes are necessary for the user
|
||||||
|
// agent to receive.
|
||||||
|
doForwardTrailers := requestAcceptsTrailers(req)
|
||||||
|
|
||||||
|
if doForwardTrailers {
|
||||||
|
handleForwardResponseTrailerHeader(w, md)
|
||||||
|
w.Header().Set("Transfer-Encoding", "chunked")
|
||||||
|
}
|
||||||
|
|
||||||
|
handleForwardResponseTrailerHeader(w, md)
|
||||||
|
|
||||||
|
contentType := marshaler.ContentType(resp)
|
||||||
|
w.Header().Set("Content-Type", contentType)
|
||||||
|
|
||||||
|
if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil {
|
||||||
|
HTTPError(ctx, mux, marshaler, w, req, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var buf []byte
|
||||||
|
var err error
|
||||||
|
if rb, ok := resp.(responseBody); ok {
|
||||||
|
buf, err = marshaler.Marshal(rb.XXX_ResponseBody())
|
||||||
|
} else {
|
||||||
|
buf, err = marshaler.Marshal(resp)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
grpclog.Infof("Marshal error: %v", err)
|
||||||
|
HTTPError(ctx, mux, marshaler, w, req, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err = w.Write(buf); err != nil {
|
||||||
|
grpclog.Infof("Failed to write response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if doForwardTrailers {
|
||||||
|
handleForwardResponseTrailer(w, md)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func requestAcceptsTrailers(req *http.Request) bool {
|
||||||
|
te := req.Header.Get("TE")
|
||||||
|
return strings.Contains(strings.ToLower(te), "trailers")
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, resp proto.Message, opts []func(context.Context, http.ResponseWriter, proto.Message) error) error {
|
||||||
|
if len(opts) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
for _, opt := range opts {
|
||||||
|
if err := opt(ctx, w, resp); err != nil {
|
||||||
|
grpclog.Infof("Error handling ForwardResponseOptions: %v", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, req *http.Request, mux *ServeMux, err error, delimiter []byte) {
|
||||||
|
st := mux.streamErrorHandler(ctx, err)
|
||||||
|
msg := errorChunk(st)
|
||||||
|
if !wroteHeader {
|
||||||
|
w.Header().Set("Content-Type", marshaler.ContentType(msg))
|
||||||
|
w.WriteHeader(HTTPStatusFromCode(st.Code()))
|
||||||
|
}
|
||||||
|
buf, err := marshaler.Marshal(msg)
|
||||||
|
if err != nil {
|
||||||
|
grpclog.Infof("Failed to marshal an error: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if _, err := w.Write(buf); err != nil {
|
||||||
|
grpclog.Infof("Failed to notify error to client: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if _, err := w.Write(delimiter); err != nil {
|
||||||
|
grpclog.Infof("Failed to send delimiter chunk: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func errorChunk(st *status.Status) map[string]proto.Message {
|
||||||
|
return map[string]proto.Message{"error": st.Proto()}
|
||||||
|
}
|
32
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_httpbodyproto.go
generated
vendored
Normal file
32
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_httpbodyproto.go
generated
vendored
Normal file
|
@ -0,0 +1,32 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"google.golang.org/genproto/googleapis/api/httpbody"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HTTPBodyMarshaler is a Marshaler which supports marshaling of a
|
||||||
|
// google.api.HttpBody message as the full response body if it is
|
||||||
|
// the actual message used as the response. If not, then this will
|
||||||
|
// simply fallback to the Marshaler specified as its default Marshaler.
|
||||||
|
type HTTPBodyMarshaler struct {
|
||||||
|
Marshaler
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContentType returns its specified content type in case v is a
|
||||||
|
// google.api.HttpBody message, otherwise it will fall back to the default Marshalers
|
||||||
|
// content type.
|
||||||
|
func (h *HTTPBodyMarshaler) ContentType(v interface{}) string {
|
||||||
|
if httpBody, ok := v.(*httpbody.HttpBody); ok {
|
||||||
|
return httpBody.GetContentType()
|
||||||
|
}
|
||||||
|
return h.Marshaler.ContentType(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Marshal marshals "v" by returning the body bytes if v is a
|
||||||
|
// google.api.HttpBody message, otherwise it falls back to the default Marshaler.
|
||||||
|
func (h *HTTPBodyMarshaler) Marshal(v interface{}) ([]byte, error) {
|
||||||
|
if httpBody, ok := v.(*httpbody.HttpBody); ok {
|
||||||
|
return httpBody.Data, nil
|
||||||
|
}
|
||||||
|
return h.Marshaler.Marshal(v)
|
||||||
|
}
|
45
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go
generated
vendored
Normal file
45
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go
generated
vendored
Normal file
|
@ -0,0 +1,45 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// JSONBuiltin is a Marshaler which marshals/unmarshals into/from JSON
|
||||||
|
// with the standard "encoding/json" package of Golang.
|
||||||
|
// Although it is generally faster for simple proto messages than JSONPb,
|
||||||
|
// it does not support advanced features of protobuf, e.g. map, oneof, ....
|
||||||
|
//
|
||||||
|
// The NewEncoder and NewDecoder types return *json.Encoder and
|
||||||
|
// *json.Decoder respectively.
|
||||||
|
type JSONBuiltin struct{}
|
||||||
|
|
||||||
|
// ContentType always Returns "application/json".
|
||||||
|
func (*JSONBuiltin) ContentType(_ interface{}) string {
|
||||||
|
return "application/json"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Marshal marshals "v" into JSON
|
||||||
|
func (j *JSONBuiltin) Marshal(v interface{}) ([]byte, error) {
|
||||||
|
return json.Marshal(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal unmarshals JSON data into "v".
|
||||||
|
func (j *JSONBuiltin) Unmarshal(data []byte, v interface{}) error {
|
||||||
|
return json.Unmarshal(data, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDecoder returns a Decoder which reads JSON stream from "r".
|
||||||
|
func (j *JSONBuiltin) NewDecoder(r io.Reader) Decoder {
|
||||||
|
return json.NewDecoder(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewEncoder returns an Encoder which writes JSON stream into "w".
|
||||||
|
func (j *JSONBuiltin) NewEncoder(w io.Writer) Encoder {
|
||||||
|
return json.NewEncoder(w)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delimiter for newline encoded JSON streams.
|
||||||
|
func (j *JSONBuiltin) Delimiter() []byte {
|
||||||
|
return []byte("\n")
|
||||||
|
}
|
348
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go
generated
vendored
Normal file
348
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go
generated
vendored
Normal file
|
@ -0,0 +1,348 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"google.golang.org/protobuf/encoding/protojson"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
// JSONPb is a Marshaler which marshals/unmarshals into/from JSON
|
||||||
|
// with the "google.golang.org/protobuf/encoding/protojson" marshaler.
|
||||||
|
// It supports the full functionality of protobuf unlike JSONBuiltin.
|
||||||
|
//
|
||||||
|
// The NewDecoder method returns a DecoderWrapper, so the underlying
|
||||||
|
// *json.Decoder methods can be used.
|
||||||
|
type JSONPb struct {
|
||||||
|
protojson.MarshalOptions
|
||||||
|
protojson.UnmarshalOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContentType always returns "application/json".
|
||||||
|
func (*JSONPb) ContentType(_ interface{}) string {
|
||||||
|
return "application/json"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Marshal marshals "v" into JSON.
|
||||||
|
func (j *JSONPb) Marshal(v interface{}) ([]byte, error) {
|
||||||
|
if _, ok := v.(proto.Message); !ok {
|
||||||
|
return j.marshalNonProtoField(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
if err := j.marshalTo(&buf, v); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return buf.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error {
|
||||||
|
p, ok := v.(proto.Message)
|
||||||
|
if !ok {
|
||||||
|
buf, err := j.marshalNonProtoField(v)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = w.Write(buf)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
b, err := j.MarshalOptions.Marshal(p)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = w.Write(b)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// protoMessageType is stored to prevent constant lookup of the same type at runtime.
|
||||||
|
protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem()
|
||||||
|
)
|
||||||
|
|
||||||
|
// marshalNonProto marshals a non-message field of a protobuf message.
|
||||||
|
// This function does not correctly marshal arbitrary data structures into JSON,
|
||||||
|
// it is only capable of marshaling non-message field values of protobuf,
|
||||||
|
// i.e. primitive types, enums; pointers to primitives or enums; maps from
|
||||||
|
// integer/string types to primitives/enums/pointers to messages.
|
||||||
|
func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) {
|
||||||
|
if v == nil {
|
||||||
|
return []byte("null"), nil
|
||||||
|
}
|
||||||
|
rv := reflect.ValueOf(v)
|
||||||
|
for rv.Kind() == reflect.Ptr {
|
||||||
|
if rv.IsNil() {
|
||||||
|
return []byte("null"), nil
|
||||||
|
}
|
||||||
|
rv = rv.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
if rv.Kind() == reflect.Slice {
|
||||||
|
if rv.IsNil() {
|
||||||
|
if j.EmitUnpopulated {
|
||||||
|
return []byte("[]"), nil
|
||||||
|
}
|
||||||
|
return []byte("null"), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if rv.Type().Elem().Implements(protoMessageType) {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
if err := buf.WriteByte('['); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for i := 0; i < rv.Len(); i++ {
|
||||||
|
if i != 0 {
|
||||||
|
if err := buf.WriteByte(','); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := j.marshalTo(&buf, rv.Index(i).Interface().(proto.Message)); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := buf.WriteByte(']'); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if rv.Type().Elem().Implements(typeProtoEnum) {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
if err := buf.WriteByte('['); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for i := 0; i < rv.Len(); i++ {
|
||||||
|
if i != 0 {
|
||||||
|
if err := buf.WriteByte(','); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
if j.UseEnumNumbers {
|
||||||
|
_, err = buf.WriteString(strconv.FormatInt(rv.Index(i).Int(), 10))
|
||||||
|
} else {
|
||||||
|
_, err = buf.WriteString("\"" + rv.Index(i).Interface().(protoEnum).String() + "\"")
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := buf.WriteByte(']'); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.Bytes(), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if rv.Kind() == reflect.Map {
|
||||||
|
m := make(map[string]*json.RawMessage)
|
||||||
|
for _, k := range rv.MapKeys() {
|
||||||
|
buf, err := j.Marshal(rv.MapIndex(k).Interface())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
m[fmt.Sprintf("%v", k.Interface())] = (*json.RawMessage)(&buf)
|
||||||
|
}
|
||||||
|
if j.Indent != "" {
|
||||||
|
return json.MarshalIndent(m, "", j.Indent)
|
||||||
|
}
|
||||||
|
return json.Marshal(m)
|
||||||
|
}
|
||||||
|
if enum, ok := rv.Interface().(protoEnum); ok && !j.UseEnumNumbers {
|
||||||
|
return json.Marshal(enum.String())
|
||||||
|
}
|
||||||
|
return json.Marshal(rv.Interface())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal unmarshals JSON "data" into "v"
|
||||||
|
func (j *JSONPb) Unmarshal(data []byte, v interface{}) error {
|
||||||
|
return unmarshalJSONPb(data, j.UnmarshalOptions, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDecoder returns a Decoder which reads JSON stream from "r".
|
||||||
|
func (j *JSONPb) NewDecoder(r io.Reader) Decoder {
|
||||||
|
d := json.NewDecoder(r)
|
||||||
|
return DecoderWrapper{
|
||||||
|
Decoder: d,
|
||||||
|
UnmarshalOptions: j.UnmarshalOptions,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecoderWrapper is a wrapper around a *json.Decoder that adds
|
||||||
|
// support for protos to the Decode method.
|
||||||
|
type DecoderWrapper struct {
|
||||||
|
*json.Decoder
|
||||||
|
protojson.UnmarshalOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode wraps the embedded decoder's Decode method to support
|
||||||
|
// protos using a jsonpb.Unmarshaler.
|
||||||
|
func (d DecoderWrapper) Decode(v interface{}) error {
|
||||||
|
return decodeJSONPb(d.Decoder, d.UnmarshalOptions, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewEncoder returns an Encoder which writes JSON stream into "w".
|
||||||
|
func (j *JSONPb) NewEncoder(w io.Writer) Encoder {
|
||||||
|
return EncoderFunc(func(v interface{}) error {
|
||||||
|
if err := j.marshalTo(w, v); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// mimic json.Encoder by adding a newline (makes output
|
||||||
|
// easier to read when it contains multiple encoded items)
|
||||||
|
_, err := w.Write(j.Delimiter())
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func unmarshalJSONPb(data []byte, unmarshaler protojson.UnmarshalOptions, v interface{}) error {
|
||||||
|
d := json.NewDecoder(bytes.NewReader(data))
|
||||||
|
return decodeJSONPb(d, unmarshaler, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeJSONPb(d *json.Decoder, unmarshaler protojson.UnmarshalOptions, v interface{}) error {
|
||||||
|
p, ok := v.(proto.Message)
|
||||||
|
if !ok {
|
||||||
|
return decodeNonProtoField(d, unmarshaler, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode into bytes for marshalling
|
||||||
|
var b json.RawMessage
|
||||||
|
if err := d.Decode(&b); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return unmarshaler.Unmarshal([]byte(b), p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeNonProtoField(d *json.Decoder, unmarshaler protojson.UnmarshalOptions, v interface{}) error {
|
||||||
|
rv := reflect.ValueOf(v)
|
||||||
|
if rv.Kind() != reflect.Ptr {
|
||||||
|
return fmt.Errorf("%T is not a pointer", v)
|
||||||
|
}
|
||||||
|
for rv.Kind() == reflect.Ptr {
|
||||||
|
if rv.IsNil() {
|
||||||
|
rv.Set(reflect.New(rv.Type().Elem()))
|
||||||
|
}
|
||||||
|
if rv.Type().ConvertibleTo(typeProtoMessage) {
|
||||||
|
// Decode into bytes for marshalling
|
||||||
|
var b json.RawMessage
|
||||||
|
if err := d.Decode(&b); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return unmarshaler.Unmarshal([]byte(b), rv.Interface().(proto.Message))
|
||||||
|
}
|
||||||
|
rv = rv.Elem()
|
||||||
|
}
|
||||||
|
if rv.Kind() == reflect.Map {
|
||||||
|
if rv.IsNil() {
|
||||||
|
rv.Set(reflect.MakeMap(rv.Type()))
|
||||||
|
}
|
||||||
|
conv, ok := convFromType[rv.Type().Key().Kind()]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("unsupported type of map field key: %v", rv.Type().Key())
|
||||||
|
}
|
||||||
|
|
||||||
|
m := make(map[string]*json.RawMessage)
|
||||||
|
if err := d.Decode(&m); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for k, v := range m {
|
||||||
|
result := conv.Call([]reflect.Value{reflect.ValueOf(k)})
|
||||||
|
if err := result[1].Interface(); err != nil {
|
||||||
|
return err.(error)
|
||||||
|
}
|
||||||
|
bk := result[0]
|
||||||
|
bv := reflect.New(rv.Type().Elem())
|
||||||
|
if v == nil {
|
||||||
|
null := json.RawMessage("null")
|
||||||
|
v = &null
|
||||||
|
}
|
||||||
|
if err := unmarshalJSONPb([]byte(*v), unmarshaler, bv.Interface()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
rv.SetMapIndex(bk, bv.Elem())
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if rv.Kind() == reflect.Slice {
|
||||||
|
if rv.Type().Elem().Kind() == reflect.Uint8 {
|
||||||
|
var sl []byte
|
||||||
|
if err := d.Decode(&sl); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if sl != nil {
|
||||||
|
rv.SetBytes(sl)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var sl []json.RawMessage
|
||||||
|
if err := d.Decode(&sl); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if sl != nil {
|
||||||
|
rv.Set(reflect.MakeSlice(rv.Type(), 0, 0))
|
||||||
|
}
|
||||||
|
for _, item := range sl {
|
||||||
|
bv := reflect.New(rv.Type().Elem())
|
||||||
|
if err := unmarshalJSONPb([]byte(item), unmarshaler, bv.Interface()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
rv.Set(reflect.Append(rv, bv.Elem()))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if _, ok := rv.Interface().(protoEnum); ok {
|
||||||
|
var repr interface{}
|
||||||
|
if err := d.Decode(&repr); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch v := repr.(type) {
|
||||||
|
case string:
|
||||||
|
// TODO(yugui) Should use proto.StructProperties?
|
||||||
|
return fmt.Errorf("unmarshaling of symbolic enum %q not supported: %T", repr, rv.Interface())
|
||||||
|
case float64:
|
||||||
|
rv.Set(reflect.ValueOf(int32(v)).Convert(rv.Type()))
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("cannot assign %#v into Go type %T", repr, rv.Interface())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return d.Decode(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
type protoEnum interface {
|
||||||
|
fmt.Stringer
|
||||||
|
EnumDescriptor() ([]byte, []int)
|
||||||
|
}
|
||||||
|
|
||||||
|
var typeProtoEnum = reflect.TypeOf((*protoEnum)(nil)).Elem()
|
||||||
|
|
||||||
|
var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem()
|
||||||
|
|
||||||
|
// Delimiter for newline encoded JSON streams.
|
||||||
|
func (j *JSONPb) Delimiter() []byte {
|
||||||
|
return []byte("\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
convFromType = map[reflect.Kind]reflect.Value{
|
||||||
|
reflect.String: reflect.ValueOf(String),
|
||||||
|
reflect.Bool: reflect.ValueOf(Bool),
|
||||||
|
reflect.Float64: reflect.ValueOf(Float64),
|
||||||
|
reflect.Float32: reflect.ValueOf(Float32),
|
||||||
|
reflect.Int64: reflect.ValueOf(Int64),
|
||||||
|
reflect.Int32: reflect.ValueOf(Int32),
|
||||||
|
reflect.Uint64: reflect.ValueOf(Uint64),
|
||||||
|
reflect.Uint32: reflect.ValueOf(Uint32),
|
||||||
|
reflect.Slice: reflect.ValueOf(Bytes),
|
||||||
|
}
|
||||||
|
)
|
60
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go
generated
vendored
Normal file
60
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go
generated
vendored
Normal file
|
@ -0,0 +1,60 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ProtoMarshaller is a Marshaller which marshals/unmarshals into/from serialize proto bytes
|
||||||
|
type ProtoMarshaller struct{}
|
||||||
|
|
||||||
|
// ContentType always returns "application/octet-stream".
|
||||||
|
func (*ProtoMarshaller) ContentType(_ interface{}) string {
|
||||||
|
return "application/octet-stream"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Marshal marshals "value" into Proto
|
||||||
|
func (*ProtoMarshaller) Marshal(value interface{}) ([]byte, error) {
|
||||||
|
message, ok := value.(proto.Message)
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.New("unable to marshal non proto field")
|
||||||
|
}
|
||||||
|
return proto.Marshal(message)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal unmarshals proto "data" into "value"
|
||||||
|
func (*ProtoMarshaller) Unmarshal(data []byte, value interface{}) error {
|
||||||
|
message, ok := value.(proto.Message)
|
||||||
|
if !ok {
|
||||||
|
return errors.New("unable to unmarshal non proto field")
|
||||||
|
}
|
||||||
|
return proto.Unmarshal(data, message)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDecoder returns a Decoder which reads proto stream from "reader".
|
||||||
|
func (marshaller *ProtoMarshaller) NewDecoder(reader io.Reader) Decoder {
|
||||||
|
return DecoderFunc(func(value interface{}) error {
|
||||||
|
buffer, err := io.ReadAll(reader)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return marshaller.Unmarshal(buffer, value)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewEncoder returns an Encoder which writes proto stream into "writer".
|
||||||
|
func (marshaller *ProtoMarshaller) NewEncoder(writer io.Writer) Encoder {
|
||||||
|
return EncoderFunc(func(value interface{}) error {
|
||||||
|
buffer, err := marshaller.Marshal(value)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := writer.Write(buffer); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
50
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go
generated
vendored
Normal file
50
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go
generated
vendored
Normal file
|
@ -0,0 +1,50 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Marshaler defines a conversion between byte sequence and gRPC payloads / fields.
|
||||||
|
type Marshaler interface {
|
||||||
|
// Marshal marshals "v" into byte sequence.
|
||||||
|
Marshal(v interface{}) ([]byte, error)
|
||||||
|
// Unmarshal unmarshals "data" into "v".
|
||||||
|
// "v" must be a pointer value.
|
||||||
|
Unmarshal(data []byte, v interface{}) error
|
||||||
|
// NewDecoder returns a Decoder which reads byte sequence from "r".
|
||||||
|
NewDecoder(r io.Reader) Decoder
|
||||||
|
// NewEncoder returns an Encoder which writes bytes sequence into "w".
|
||||||
|
NewEncoder(w io.Writer) Encoder
|
||||||
|
// ContentType returns the Content-Type which this marshaler is responsible for.
|
||||||
|
// The parameter describes the type which is being marshalled, which can sometimes
|
||||||
|
// affect the content type returned.
|
||||||
|
ContentType(v interface{}) string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decoder decodes a byte sequence
|
||||||
|
type Decoder interface {
|
||||||
|
Decode(v interface{}) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encoder encodes gRPC payloads / fields into byte sequence.
|
||||||
|
type Encoder interface {
|
||||||
|
Encode(v interface{}) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecoderFunc adapts an decoder function into Decoder.
|
||||||
|
type DecoderFunc func(v interface{}) error
|
||||||
|
|
||||||
|
// Decode delegates invocations to the underlying function itself.
|
||||||
|
func (f DecoderFunc) Decode(v interface{}) error { return f(v) }
|
||||||
|
|
||||||
|
// EncoderFunc adapts an encoder function into Encoder
|
||||||
|
type EncoderFunc func(v interface{}) error
|
||||||
|
|
||||||
|
// Encode delegates invocations to the underlying function itself.
|
||||||
|
func (f EncoderFunc) Encode(v interface{}) error { return f(v) }
|
||||||
|
|
||||||
|
// Delimited defines the streaming delimiter.
|
||||||
|
type Delimited interface {
|
||||||
|
// Delimiter returns the record separator for the stream.
|
||||||
|
Delimiter() []byte
|
||||||
|
}
|
109
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go
generated
vendored
Normal file
109
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go
generated
vendored
Normal file
|
@ -0,0 +1,109 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"mime"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
|
"google.golang.org/protobuf/encoding/protojson"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MIMEWildcard is the fallback MIME type used for requests which do not match
|
||||||
|
// a registered MIME type.
|
||||||
|
const MIMEWildcard = "*"
|
||||||
|
|
||||||
|
var (
|
||||||
|
acceptHeader = http.CanonicalHeaderKey("Accept")
|
||||||
|
contentTypeHeader = http.CanonicalHeaderKey("Content-Type")
|
||||||
|
|
||||||
|
defaultMarshaler = &HTTPBodyMarshaler{
|
||||||
|
Marshaler: &JSONPb{
|
||||||
|
MarshalOptions: protojson.MarshalOptions{
|
||||||
|
EmitUnpopulated: true,
|
||||||
|
},
|
||||||
|
UnmarshalOptions: protojson.UnmarshalOptions{
|
||||||
|
DiscardUnknown: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// MarshalerForRequest returns the inbound/outbound marshalers for this request.
|
||||||
|
// It checks the registry on the ServeMux for the MIME type set by the Content-Type header.
|
||||||
|
// If it isn't set (or the request Content-Type is empty), checks for "*".
|
||||||
|
// If there are multiple Content-Type headers set, choose the first one that it can
|
||||||
|
// exactly match in the registry.
|
||||||
|
// Otherwise, it follows the above logic for "*"/InboundMarshaler/OutboundMarshaler.
|
||||||
|
func MarshalerForRequest(mux *ServeMux, r *http.Request) (inbound Marshaler, outbound Marshaler) {
|
||||||
|
for _, acceptVal := range r.Header[acceptHeader] {
|
||||||
|
if m, ok := mux.marshalers.mimeMap[acceptVal]; ok {
|
||||||
|
outbound = m
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, contentTypeVal := range r.Header[contentTypeHeader] {
|
||||||
|
contentType, _, err := mime.ParseMediaType(contentTypeVal)
|
||||||
|
if err != nil {
|
||||||
|
grpclog.Infof("Failed to parse Content-Type %s: %v", contentTypeVal, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if m, ok := mux.marshalers.mimeMap[contentType]; ok {
|
||||||
|
inbound = m
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if inbound == nil {
|
||||||
|
inbound = mux.marshalers.mimeMap[MIMEWildcard]
|
||||||
|
}
|
||||||
|
if outbound == nil {
|
||||||
|
outbound = inbound
|
||||||
|
}
|
||||||
|
|
||||||
|
return inbound, outbound
|
||||||
|
}
|
||||||
|
|
||||||
|
// marshalerRegistry is a mapping from MIME types to Marshalers.
|
||||||
|
type marshalerRegistry struct {
|
||||||
|
mimeMap map[string]Marshaler
|
||||||
|
}
|
||||||
|
|
||||||
|
// add adds a marshaler for a case-sensitive MIME type string ("*" to match any
|
||||||
|
// MIME type).
|
||||||
|
func (m marshalerRegistry) add(mime string, marshaler Marshaler) error {
|
||||||
|
if len(mime) == 0 {
|
||||||
|
return errors.New("empty MIME type")
|
||||||
|
}
|
||||||
|
|
||||||
|
m.mimeMap[mime] = marshaler
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// makeMarshalerMIMERegistry returns a new registry of marshalers.
|
||||||
|
// It allows for a mapping of case-sensitive Content-Type MIME type string to runtime.Marshaler interfaces.
|
||||||
|
//
|
||||||
|
// For example, you could allow the client to specify the use of the runtime.JSONPb marshaler
|
||||||
|
// with a "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler
|
||||||
|
// with a "application/json" Content-Type.
|
||||||
|
// "*" can be used to match any Content-Type.
|
||||||
|
// This can be attached to a ServerMux with the marshaler option.
|
||||||
|
func makeMarshalerMIMERegistry() marshalerRegistry {
|
||||||
|
return marshalerRegistry{
|
||||||
|
mimeMap: map[string]Marshaler{
|
||||||
|
MIMEWildcard: defaultMarshaler,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithMarshalerOption returns a ServeMuxOption which associates inbound and outbound
|
||||||
|
// Marshalers to a MIME type in mux.
|
||||||
|
func WithMarshalerOption(mime string, marshaler Marshaler) ServeMuxOption {
|
||||||
|
return func(mux *ServeMux) {
|
||||||
|
if err := mux.marshalers.add(mime, marshaler); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,466 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/textproto"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
|
"google.golang.org/grpc/health/grpc_health_v1"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
// UnescapingMode defines the behavior of ServeMux when unescaping path parameters.
|
||||||
|
type UnescapingMode int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// UnescapingModeLegacy is the default V2 behavior, which escapes the entire
|
||||||
|
// path string before doing any routing.
|
||||||
|
UnescapingModeLegacy UnescapingMode = iota
|
||||||
|
|
||||||
|
// UnescapingModeAllExceptReserved unescapes all path parameters except RFC 6570
|
||||||
|
// reserved characters.
|
||||||
|
UnescapingModeAllExceptReserved
|
||||||
|
|
||||||
|
// UnescapingModeAllExceptSlash unescapes URL path parameters except path
|
||||||
|
// separators, which will be left as "%2F".
|
||||||
|
UnescapingModeAllExceptSlash
|
||||||
|
|
||||||
|
// UnescapingModeAllCharacters unescapes all URL path parameters.
|
||||||
|
UnescapingModeAllCharacters
|
||||||
|
|
||||||
|
// UnescapingModeDefault is the default escaping type.
|
||||||
|
// TODO(v3): default this to UnescapingModeAllExceptReserved per grpc-httpjson-transcoding's
|
||||||
|
// reference implementation
|
||||||
|
UnescapingModeDefault = UnescapingModeLegacy
|
||||||
|
)
|
||||||
|
|
||||||
|
var encodedPathSplitter = regexp.MustCompile("(/|%2F)")
|
||||||
|
|
||||||
|
// A HandlerFunc handles a specific pair of path pattern and HTTP method.
|
||||||
|
type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string)
|
||||||
|
|
||||||
|
// ServeMux is a request multiplexer for grpc-gateway.
|
||||||
|
// It matches http requests to patterns and invokes the corresponding handler.
|
||||||
|
type ServeMux struct {
|
||||||
|
// handlers maps HTTP method to a list of handlers.
|
||||||
|
handlers map[string][]handler
|
||||||
|
forwardResponseOptions []func(context.Context, http.ResponseWriter, proto.Message) error
|
||||||
|
marshalers marshalerRegistry
|
||||||
|
incomingHeaderMatcher HeaderMatcherFunc
|
||||||
|
outgoingHeaderMatcher HeaderMatcherFunc
|
||||||
|
metadataAnnotators []func(context.Context, *http.Request) metadata.MD
|
||||||
|
errorHandler ErrorHandlerFunc
|
||||||
|
streamErrorHandler StreamErrorHandlerFunc
|
||||||
|
routingErrorHandler RoutingErrorHandlerFunc
|
||||||
|
disablePathLengthFallback bool
|
||||||
|
unescapingMode UnescapingMode
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServeMuxOption is an option that can be given to a ServeMux on construction.
|
||||||
|
type ServeMuxOption func(*ServeMux)
|
||||||
|
|
||||||
|
// WithForwardResponseOption returns a ServeMuxOption representing the forwardResponseOption.
|
||||||
|
//
|
||||||
|
// forwardResponseOption is an option that will be called on the relevant context.Context,
|
||||||
|
// http.ResponseWriter, and proto.Message before every forwarded response.
|
||||||
|
//
|
||||||
|
// The message may be nil in the case where just a header is being sent.
|
||||||
|
func WithForwardResponseOption(forwardResponseOption func(context.Context, http.ResponseWriter, proto.Message) error) ServeMuxOption {
|
||||||
|
return func(serveMux *ServeMux) {
|
||||||
|
serveMux.forwardResponseOptions = append(serveMux.forwardResponseOptions, forwardResponseOption)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithUnescapingMode sets the escaping type. See the definitions of UnescapingMode
|
||||||
|
// for more information.
|
||||||
|
func WithUnescapingMode(mode UnescapingMode) ServeMuxOption {
|
||||||
|
return func(serveMux *ServeMux) {
|
||||||
|
serveMux.unescapingMode = mode
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetQueryParameterParser sets the query parameter parser, used to populate message from query parameters.
|
||||||
|
// Configuring this will mean the generated OpenAPI output is no longer correct, and it should be
|
||||||
|
// done with careful consideration.
|
||||||
|
func SetQueryParameterParser(queryParameterParser QueryParameterParser) ServeMuxOption {
|
||||||
|
return func(serveMux *ServeMux) {
|
||||||
|
currentQueryParser = queryParameterParser
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// HeaderMatcherFunc checks whether a header key should be forwarded to/from gRPC context.
|
||||||
|
type HeaderMatcherFunc func(string) (string, bool)
|
||||||
|
|
||||||
|
// DefaultHeaderMatcher is used to pass http request headers to/from gRPC context. This adds permanent HTTP header
|
||||||
|
// keys (as specified by the IANA, e.g: Accept, Cookie, Host) to the gRPC metadata with the grpcgateway- prefix. If you want to know which headers are considered permanent, you can view the isPermanentHTTPHeader function.
|
||||||
|
// HTTP headers that start with 'Grpc-Metadata-' are mapped to gRPC metadata after removing the prefix 'Grpc-Metadata-'.
|
||||||
|
// Other headers are not added to the gRPC metadata.
|
||||||
|
func DefaultHeaderMatcher(key string) (string, bool) {
|
||||||
|
switch key = textproto.CanonicalMIMEHeaderKey(key); {
|
||||||
|
case isPermanentHTTPHeader(key):
|
||||||
|
return MetadataPrefix + key, true
|
||||||
|
case strings.HasPrefix(key, MetadataHeaderPrefix):
|
||||||
|
return key[len(MetadataHeaderPrefix):], true
|
||||||
|
}
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithIncomingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for incoming request to gateway.
|
||||||
|
//
|
||||||
|
// This matcher will be called with each header in http.Request. If matcher returns true, that header will be
|
||||||
|
// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return modified header.
|
||||||
|
func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
|
||||||
|
for _, header := range fn.matchedMalformedHeaders() {
|
||||||
|
grpclog.Warningf("The configured forwarding filter would allow %q to be sent to the gRPC server, which will likely cause errors. See https://github.com/grpc/grpc-go/pull/4803#issuecomment-986093310 for more information.", header)
|
||||||
|
}
|
||||||
|
|
||||||
|
return func(mux *ServeMux) {
|
||||||
|
mux.incomingHeaderMatcher = fn
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// matchedMalformedHeaders returns the malformed headers that would be forwarded to gRPC server.
|
||||||
|
func (fn HeaderMatcherFunc) matchedMalformedHeaders() []string {
|
||||||
|
if fn == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
headers := make([]string, 0)
|
||||||
|
for header := range malformedHTTPHeaders {
|
||||||
|
out, accept := fn(header)
|
||||||
|
if accept && isMalformedHTTPHeader(out) {
|
||||||
|
headers = append(headers, out)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return headers
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithOutgoingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway.
|
||||||
|
//
|
||||||
|
// This matcher will be called with each header in response header metadata. If matcher returns true, that header will be
|
||||||
|
// passed to http response returned from gateway. To transform the header before passing to response,
|
||||||
|
// matcher should return modified header.
|
||||||
|
func WithOutgoingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
|
||||||
|
return func(mux *ServeMux) {
|
||||||
|
mux.outgoingHeaderMatcher = fn
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithMetadata returns a ServeMuxOption for passing metadata to a gRPC context.
|
||||||
|
//
|
||||||
|
// This can be used by services that need to read from http.Request and modify gRPC context. A common use case
|
||||||
|
// is reading token from cookie and adding it in gRPC context.
|
||||||
|
func WithMetadata(annotator func(context.Context, *http.Request) metadata.MD) ServeMuxOption {
|
||||||
|
return func(serveMux *ServeMux) {
|
||||||
|
serveMux.metadataAnnotators = append(serveMux.metadataAnnotators, annotator)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithErrorHandler returns a ServeMuxOption for configuring a custom error handler.
|
||||||
|
//
|
||||||
|
// This can be used to configure a custom error response.
|
||||||
|
func WithErrorHandler(fn ErrorHandlerFunc) ServeMuxOption {
|
||||||
|
return func(serveMux *ServeMux) {
|
||||||
|
serveMux.errorHandler = fn
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithStreamErrorHandler returns a ServeMuxOption that will use the given custom stream
|
||||||
|
// error handler, which allows for customizing the error trailer for server-streaming
|
||||||
|
// calls.
|
||||||
|
//
|
||||||
|
// For stream errors that occur before any response has been written, the mux's
|
||||||
|
// ErrorHandler will be invoked. However, once data has been written, the errors must
|
||||||
|
// be handled differently: they must be included in the response body. The response body's
|
||||||
|
// final message will include the error details returned by the stream error handler.
|
||||||
|
func WithStreamErrorHandler(fn StreamErrorHandlerFunc) ServeMuxOption {
|
||||||
|
return func(serveMux *ServeMux) {
|
||||||
|
serveMux.streamErrorHandler = fn
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithRoutingErrorHandler returns a ServeMuxOption for configuring a custom error handler to handle http routing errors.
|
||||||
|
//
|
||||||
|
// Method called for errors which can happen before gRPC route selected or executed.
|
||||||
|
// The following error codes: StatusMethodNotAllowed StatusNotFound StatusBadRequest
|
||||||
|
func WithRoutingErrorHandler(fn RoutingErrorHandlerFunc) ServeMuxOption {
|
||||||
|
return func(serveMux *ServeMux) {
|
||||||
|
serveMux.routingErrorHandler = fn
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithDisablePathLengthFallback returns a ServeMuxOption for disable path length fallback.
|
||||||
|
func WithDisablePathLengthFallback() ServeMuxOption {
|
||||||
|
return func(serveMux *ServeMux) {
|
||||||
|
serveMux.disablePathLengthFallback = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithHealthEndpointAt returns a ServeMuxOption that will add an endpoint to the created ServeMux at the path specified by endpointPath.
|
||||||
|
// When called the handler will forward the request to the upstream grpc service health check (defined in the
|
||||||
|
// gRPC Health Checking Protocol).
|
||||||
|
//
|
||||||
|
// See here https://grpc-ecosystem.github.io/grpc-gateway/docs/operations/health_check/ for more information on how
|
||||||
|
// to setup the protocol in the grpc server.
|
||||||
|
//
|
||||||
|
// If you define a service as query parameter, this will also be forwarded as service in the HealthCheckRequest.
|
||||||
|
func WithHealthEndpointAt(healthCheckClient grpc_health_v1.HealthClient, endpointPath string) ServeMuxOption {
|
||||||
|
return func(s *ServeMux) {
|
||||||
|
// error can be ignored since pattern is definitely valid
|
||||||
|
_ = s.HandlePath(
|
||||||
|
http.MethodGet, endpointPath, func(w http.ResponseWriter, r *http.Request, _ map[string]string,
|
||||||
|
) {
|
||||||
|
_, outboundMarshaler := MarshalerForRequest(s, r)
|
||||||
|
|
||||||
|
resp, err := healthCheckClient.Check(r.Context(), &grpc_health_v1.HealthCheckRequest{
|
||||||
|
Service: r.URL.Query().Get("service"),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
s.errorHandler(r.Context(), s, outboundMarshaler, w, r, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
|
||||||
|
if resp.GetStatus() != grpc_health_v1.HealthCheckResponse_SERVING {
|
||||||
|
switch resp.GetStatus() {
|
||||||
|
case grpc_health_v1.HealthCheckResponse_NOT_SERVING, grpc_health_v1.HealthCheckResponse_UNKNOWN:
|
||||||
|
err = status.Error(codes.Unavailable, resp.String())
|
||||||
|
case grpc_health_v1.HealthCheckResponse_SERVICE_UNKNOWN:
|
||||||
|
err = status.Error(codes.NotFound, resp.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
s.errorHandler(r.Context(), s, outboundMarshaler, w, r, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
_ = outboundMarshaler.NewEncoder(w).Encode(resp)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithHealthzEndpoint returns a ServeMuxOption that will add a /healthz endpoint to the created ServeMux.
|
||||||
|
//
|
||||||
|
// See WithHealthEndpointAt for the general implementation.
|
||||||
|
func WithHealthzEndpoint(healthCheckClient grpc_health_v1.HealthClient) ServeMuxOption {
|
||||||
|
return WithHealthEndpointAt(healthCheckClient, "/healthz")
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewServeMux returns a new ServeMux whose internal mapping is empty.
|
||||||
|
func NewServeMux(opts ...ServeMuxOption) *ServeMux {
|
||||||
|
serveMux := &ServeMux{
|
||||||
|
handlers: make(map[string][]handler),
|
||||||
|
forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0),
|
||||||
|
marshalers: makeMarshalerMIMERegistry(),
|
||||||
|
errorHandler: DefaultHTTPErrorHandler,
|
||||||
|
streamErrorHandler: DefaultStreamErrorHandler,
|
||||||
|
routingErrorHandler: DefaultRoutingErrorHandler,
|
||||||
|
unescapingMode: UnescapingModeDefault,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(serveMux)
|
||||||
|
}
|
||||||
|
|
||||||
|
if serveMux.incomingHeaderMatcher == nil {
|
||||||
|
serveMux.incomingHeaderMatcher = DefaultHeaderMatcher
|
||||||
|
}
|
||||||
|
|
||||||
|
if serveMux.outgoingHeaderMatcher == nil {
|
||||||
|
serveMux.outgoingHeaderMatcher = func(key string) (string, bool) {
|
||||||
|
return fmt.Sprintf("%s%s", MetadataHeaderPrefix, key), true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return serveMux
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle associates "h" to the pair of HTTP method and path pattern.
|
||||||
|
func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) {
|
||||||
|
s.handlers[meth] = append([]handler{{pat: pat, h: h}}, s.handlers[meth]...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandlePath allows users to configure custom path handlers.
|
||||||
|
// refer: https://grpc-ecosystem.github.io/grpc-gateway/docs/operations/inject_router/
|
||||||
|
func (s *ServeMux) HandlePath(meth string, pathPattern string, h HandlerFunc) error {
|
||||||
|
compiler, err := httprule.Parse(pathPattern)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("parsing path pattern: %w", err)
|
||||||
|
}
|
||||||
|
tp := compiler.Compile()
|
||||||
|
pattern, err := NewPattern(tp.Version, tp.OpCodes, tp.Pool, tp.Verb)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("creating new pattern: %w", err)
|
||||||
|
}
|
||||||
|
s.Handle(meth, pattern, h)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.URL.Path.
|
||||||
|
func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx := r.Context()
|
||||||
|
|
||||||
|
path := r.URL.Path
|
||||||
|
if !strings.HasPrefix(path, "/") {
|
||||||
|
_, outboundMarshaler := MarshalerForRequest(s, r)
|
||||||
|
s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(v3): remove UnescapingModeLegacy
|
||||||
|
if s.unescapingMode != UnescapingModeLegacy && r.URL.RawPath != "" {
|
||||||
|
path = r.URL.RawPath
|
||||||
|
}
|
||||||
|
|
||||||
|
if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) {
|
||||||
|
r.Method = strings.ToUpper(override)
|
||||||
|
if err := r.ParseForm(); err != nil {
|
||||||
|
_, outboundMarshaler := MarshalerForRequest(s, r)
|
||||||
|
sterr := status.Error(codes.InvalidArgument, err.Error())
|
||||||
|
s.errorHandler(ctx, s, outboundMarshaler, w, r, sterr)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var pathComponents []string
|
||||||
|
// since in UnescapeModeLegacy, the URL will already have been fully unescaped, if we also split on "%2F"
|
||||||
|
// in this escaping mode we would be double unescaping but in UnescapingModeAllCharacters, we still do as the
|
||||||
|
// path is the RawPath (i.e. unescaped). That does mean that the behavior of this function will change its default
|
||||||
|
// behavior when the UnescapingModeDefault gets changed from UnescapingModeLegacy to UnescapingModeAllExceptReserved
|
||||||
|
if s.unescapingMode == UnescapingModeAllCharacters {
|
||||||
|
pathComponents = encodedPathSplitter.Split(path[1:], -1)
|
||||||
|
} else {
|
||||||
|
pathComponents = strings.Split(path[1:], "/")
|
||||||
|
}
|
||||||
|
|
||||||
|
lastPathComponent := pathComponents[len(pathComponents)-1]
|
||||||
|
|
||||||
|
for _, h := range s.handlers[r.Method] {
|
||||||
|
// If the pattern has a verb, explicitly look for a suffix in the last
|
||||||
|
// component that matches a colon plus the verb. This allows us to
|
||||||
|
// handle some cases that otherwise can't be correctly handled by the
|
||||||
|
// former LastIndex case, such as when the verb literal itself contains
|
||||||
|
// a colon. This should work for all cases that have run through the
|
||||||
|
// parser because we know what verb we're looking for, however, there
|
||||||
|
// are still some cases that the parser itself cannot disambiguate. See
|
||||||
|
// the comment there if interested.
|
||||||
|
|
||||||
|
var verb string
|
||||||
|
patVerb := h.pat.Verb()
|
||||||
|
|
||||||
|
idx := -1
|
||||||
|
if patVerb != "" && strings.HasSuffix(lastPathComponent, ":"+patVerb) {
|
||||||
|
idx = len(lastPathComponent) - len(patVerb) - 1
|
||||||
|
}
|
||||||
|
if idx == 0 {
|
||||||
|
_, outboundMarshaler := MarshalerForRequest(s, r)
|
||||||
|
s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusNotFound)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
comps := make([]string, len(pathComponents))
|
||||||
|
copy(comps, pathComponents)
|
||||||
|
|
||||||
|
if idx > 0 {
|
||||||
|
comps[len(comps)-1], verb = lastPathComponent[:idx], lastPathComponent[idx+1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
pathParams, err := h.pat.MatchAndEscape(comps, verb, s.unescapingMode)
|
||||||
|
if err != nil {
|
||||||
|
var mse MalformedSequenceError
|
||||||
|
if ok := errors.As(err, &mse); ok {
|
||||||
|
_, outboundMarshaler := MarshalerForRequest(s, r)
|
||||||
|
s.errorHandler(ctx, s, outboundMarshaler, w, r, &HTTPStatusError{
|
||||||
|
HTTPStatus: http.StatusBadRequest,
|
||||||
|
Err: mse,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
h.h(w, r, pathParams)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// if no handler has found for the request, lookup for other methods
|
||||||
|
// to handle POST -> GET fallback if the request is subject to path
|
||||||
|
// length fallback.
|
||||||
|
// Note we are not eagerly checking the request here as we want to return the
|
||||||
|
// right HTTP status code, and we need to process the fallback candidates in
|
||||||
|
// order to do that.
|
||||||
|
for m, handlers := range s.handlers {
|
||||||
|
if m == r.Method {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, h := range handlers {
|
||||||
|
var verb string
|
||||||
|
patVerb := h.pat.Verb()
|
||||||
|
|
||||||
|
idx := -1
|
||||||
|
if patVerb != "" && strings.HasSuffix(lastPathComponent, ":"+patVerb) {
|
||||||
|
idx = len(lastPathComponent) - len(patVerb) - 1
|
||||||
|
}
|
||||||
|
|
||||||
|
comps := make([]string, len(pathComponents))
|
||||||
|
copy(comps, pathComponents)
|
||||||
|
|
||||||
|
if idx > 0 {
|
||||||
|
comps[len(comps)-1], verb = lastPathComponent[:idx], lastPathComponent[idx+1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
pathParams, err := h.pat.MatchAndEscape(comps, verb, s.unescapingMode)
|
||||||
|
if err != nil {
|
||||||
|
var mse MalformedSequenceError
|
||||||
|
if ok := errors.As(err, &mse); ok {
|
||||||
|
_, outboundMarshaler := MarshalerForRequest(s, r)
|
||||||
|
s.errorHandler(ctx, s, outboundMarshaler, w, r, &HTTPStatusError{
|
||||||
|
HTTPStatus: http.StatusBadRequest,
|
||||||
|
Err: mse,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// X-HTTP-Method-Override is optional. Always allow fallback to POST.
|
||||||
|
// Also, only consider POST -> GET fallbacks, and avoid falling back to
|
||||||
|
// potentially dangerous operations like DELETE.
|
||||||
|
if s.isPathLengthFallback(r) && m == http.MethodGet {
|
||||||
|
if err := r.ParseForm(); err != nil {
|
||||||
|
_, outboundMarshaler := MarshalerForRequest(s, r)
|
||||||
|
sterr := status.Error(codes.InvalidArgument, err.Error())
|
||||||
|
s.errorHandler(ctx, s, outboundMarshaler, w, r, sterr)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
h.h(w, r, pathParams)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
_, outboundMarshaler := MarshalerForRequest(s, r)
|
||||||
|
s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusMethodNotAllowed)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_, outboundMarshaler := MarshalerForRequest(s, r)
|
||||||
|
s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusNotFound)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetForwardResponseOptions returns the ForwardResponseOptions associated with this ServeMux.
|
||||||
|
func (s *ServeMux) GetForwardResponseOptions() []func(context.Context, http.ResponseWriter, proto.Message) error {
|
||||||
|
return s.forwardResponseOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ServeMux) isPathLengthFallback(r *http.Request) bool {
|
||||||
|
return !s.disablePathLengthFallback && r.Method == "POST" && r.Header.Get("Content-Type") == "application/x-www-form-urlencoded"
|
||||||
|
}
|
||||||
|
|
||||||
|
type handler struct {
|
||||||
|
pat Pattern
|
||||||
|
h HandlerFunc
|
||||||
|
}
|
381
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go
generated
vendored
Normal file
381
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go
generated
vendored
Normal file
|
@ -0,0 +1,381 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrNotMatch indicates that the given HTTP request path does not match to the pattern.
|
||||||
|
ErrNotMatch = errors.New("not match to the path pattern")
|
||||||
|
// ErrInvalidPattern indicates that the given definition of Pattern is not valid.
|
||||||
|
ErrInvalidPattern = errors.New("invalid pattern")
|
||||||
|
)
|
||||||
|
|
||||||
|
type MalformedSequenceError string
|
||||||
|
|
||||||
|
func (e MalformedSequenceError) Error() string {
|
||||||
|
return "malformed path escape " + strconv.Quote(string(e))
|
||||||
|
}
|
||||||
|
|
||||||
|
type op struct {
|
||||||
|
code utilities.OpCode
|
||||||
|
operand int
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pattern is a template pattern of http request paths defined in
|
||||||
|
// https://github.com/googleapis/googleapis/blob/master/google/api/http.proto
|
||||||
|
type Pattern struct {
|
||||||
|
// ops is a list of operations
|
||||||
|
ops []op
|
||||||
|
// pool is a constant pool indexed by the operands or vars.
|
||||||
|
pool []string
|
||||||
|
// vars is a list of variables names to be bound by this pattern
|
||||||
|
vars []string
|
||||||
|
// stacksize is the max depth of the stack
|
||||||
|
stacksize int
|
||||||
|
// tailLen is the length of the fixed-size segments after a deep wildcard
|
||||||
|
tailLen int
|
||||||
|
// verb is the VERB part of the path pattern. It is empty if the pattern does not have VERB part.
|
||||||
|
verb string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewPattern returns a new Pattern from the given definition values.
|
||||||
|
// "ops" is a sequence of op codes. "pool" is a constant pool.
|
||||||
|
// "verb" is the verb part of the pattern. It is empty if the pattern does not have the part.
|
||||||
|
// "version" must be 1 for now.
|
||||||
|
// It returns an error if the given definition is invalid.
|
||||||
|
func NewPattern(version int, ops []int, pool []string, verb string) (Pattern, error) {
|
||||||
|
if version != 1 {
|
||||||
|
grpclog.Infof("unsupported version: %d", version)
|
||||||
|
return Pattern{}, ErrInvalidPattern
|
||||||
|
}
|
||||||
|
|
||||||
|
l := len(ops)
|
||||||
|
if l%2 != 0 {
|
||||||
|
grpclog.Infof("odd number of ops codes: %d", l)
|
||||||
|
return Pattern{}, ErrInvalidPattern
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
typedOps []op
|
||||||
|
stack, maxstack int
|
||||||
|
tailLen int
|
||||||
|
pushMSeen bool
|
||||||
|
vars []string
|
||||||
|
)
|
||||||
|
for i := 0; i < l; i += 2 {
|
||||||
|
op := op{code: utilities.OpCode(ops[i]), operand: ops[i+1]}
|
||||||
|
switch op.code {
|
||||||
|
case utilities.OpNop:
|
||||||
|
continue
|
||||||
|
case utilities.OpPush:
|
||||||
|
if pushMSeen {
|
||||||
|
tailLen++
|
||||||
|
}
|
||||||
|
stack++
|
||||||
|
case utilities.OpPushM:
|
||||||
|
if pushMSeen {
|
||||||
|
grpclog.Infof("pushM appears twice")
|
||||||
|
return Pattern{}, ErrInvalidPattern
|
||||||
|
}
|
||||||
|
pushMSeen = true
|
||||||
|
stack++
|
||||||
|
case utilities.OpLitPush:
|
||||||
|
if op.operand < 0 || len(pool) <= op.operand {
|
||||||
|
grpclog.Infof("negative literal index: %d", op.operand)
|
||||||
|
return Pattern{}, ErrInvalidPattern
|
||||||
|
}
|
||||||
|
if pushMSeen {
|
||||||
|
tailLen++
|
||||||
|
}
|
||||||
|
stack++
|
||||||
|
case utilities.OpConcatN:
|
||||||
|
if op.operand <= 0 {
|
||||||
|
grpclog.Infof("negative concat size: %d", op.operand)
|
||||||
|
return Pattern{}, ErrInvalidPattern
|
||||||
|
}
|
||||||
|
stack -= op.operand
|
||||||
|
if stack < 0 {
|
||||||
|
grpclog.Info("stack underflow")
|
||||||
|
return Pattern{}, ErrInvalidPattern
|
||||||
|
}
|
||||||
|
stack++
|
||||||
|
case utilities.OpCapture:
|
||||||
|
if op.operand < 0 || len(pool) <= op.operand {
|
||||||
|
grpclog.Infof("variable name index out of bound: %d", op.operand)
|
||||||
|
return Pattern{}, ErrInvalidPattern
|
||||||
|
}
|
||||||
|
v := pool[op.operand]
|
||||||
|
op.operand = len(vars)
|
||||||
|
vars = append(vars, v)
|
||||||
|
stack--
|
||||||
|
if stack < 0 {
|
||||||
|
grpclog.Infof("stack underflow")
|
||||||
|
return Pattern{}, ErrInvalidPattern
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
grpclog.Infof("invalid opcode: %d", op.code)
|
||||||
|
return Pattern{}, ErrInvalidPattern
|
||||||
|
}
|
||||||
|
|
||||||
|
if maxstack < stack {
|
||||||
|
maxstack = stack
|
||||||
|
}
|
||||||
|
typedOps = append(typedOps, op)
|
||||||
|
}
|
||||||
|
return Pattern{
|
||||||
|
ops: typedOps,
|
||||||
|
pool: pool,
|
||||||
|
vars: vars,
|
||||||
|
stacksize: maxstack,
|
||||||
|
tailLen: tailLen,
|
||||||
|
verb: verb,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustPattern is a helper function which makes it easier to call NewPattern in variable initialization.
|
||||||
|
func MustPattern(p Pattern, err error) Pattern {
|
||||||
|
if err != nil {
|
||||||
|
grpclog.Fatalf("Pattern initialization failed: %v", err)
|
||||||
|
}
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
// MatchAndEscape examines components to determine if they match to a Pattern.
|
||||||
|
// MatchAndEscape will return an error if no Patterns matched or if a pattern
|
||||||
|
// matched but contained malformed escape sequences. If successful, the function
|
||||||
|
// returns a mapping from field paths to their captured values.
|
||||||
|
func (p Pattern) MatchAndEscape(components []string, verb string, unescapingMode UnescapingMode) (map[string]string, error) {
|
||||||
|
if p.verb != verb {
|
||||||
|
if p.verb != "" {
|
||||||
|
return nil, ErrNotMatch
|
||||||
|
}
|
||||||
|
if len(components) == 0 {
|
||||||
|
components = []string{":" + verb}
|
||||||
|
} else {
|
||||||
|
components = append([]string{}, components...)
|
||||||
|
components[len(components)-1] += ":" + verb
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var pos int
|
||||||
|
stack := make([]string, 0, p.stacksize)
|
||||||
|
captured := make([]string, len(p.vars))
|
||||||
|
l := len(components)
|
||||||
|
for _, op := range p.ops {
|
||||||
|
var err error
|
||||||
|
|
||||||
|
switch op.code {
|
||||||
|
case utilities.OpNop:
|
||||||
|
continue
|
||||||
|
case utilities.OpPush, utilities.OpLitPush:
|
||||||
|
if pos >= l {
|
||||||
|
return nil, ErrNotMatch
|
||||||
|
}
|
||||||
|
c := components[pos]
|
||||||
|
if op.code == utilities.OpLitPush {
|
||||||
|
if lit := p.pool[op.operand]; c != lit {
|
||||||
|
return nil, ErrNotMatch
|
||||||
|
}
|
||||||
|
} else if op.code == utilities.OpPush {
|
||||||
|
if c, err = unescape(c, unescapingMode, false); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stack = append(stack, c)
|
||||||
|
pos++
|
||||||
|
case utilities.OpPushM:
|
||||||
|
end := len(components)
|
||||||
|
if end < pos+p.tailLen {
|
||||||
|
return nil, ErrNotMatch
|
||||||
|
}
|
||||||
|
end -= p.tailLen
|
||||||
|
c := strings.Join(components[pos:end], "/")
|
||||||
|
if c, err = unescape(c, unescapingMode, true); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
stack = append(stack, c)
|
||||||
|
pos = end
|
||||||
|
case utilities.OpConcatN:
|
||||||
|
n := op.operand
|
||||||
|
l := len(stack) - n
|
||||||
|
stack = append(stack[:l], strings.Join(stack[l:], "/"))
|
||||||
|
case utilities.OpCapture:
|
||||||
|
n := len(stack) - 1
|
||||||
|
captured[op.operand] = stack[n]
|
||||||
|
stack = stack[:n]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if pos < l {
|
||||||
|
return nil, ErrNotMatch
|
||||||
|
}
|
||||||
|
bindings := make(map[string]string)
|
||||||
|
for i, val := range captured {
|
||||||
|
bindings[p.vars[i]] = val
|
||||||
|
}
|
||||||
|
return bindings, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MatchAndEscape examines components to determine if they match to a Pattern.
|
||||||
|
// It will never perform per-component unescaping (see: UnescapingModeLegacy).
|
||||||
|
// MatchAndEscape will return an error if no Patterns matched. If successful,
|
||||||
|
// the function returns a mapping from field paths to their captured values.
|
||||||
|
//
|
||||||
|
// Deprecated: Use MatchAndEscape.
|
||||||
|
func (p Pattern) Match(components []string, verb string) (map[string]string, error) {
|
||||||
|
return p.MatchAndEscape(components, verb, UnescapingModeDefault)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verb returns the verb part of the Pattern.
|
||||||
|
func (p Pattern) Verb() string { return p.verb }
|
||||||
|
|
||||||
|
func (p Pattern) String() string {
|
||||||
|
var stack []string
|
||||||
|
for _, op := range p.ops {
|
||||||
|
switch op.code {
|
||||||
|
case utilities.OpNop:
|
||||||
|
continue
|
||||||
|
case utilities.OpPush:
|
||||||
|
stack = append(stack, "*")
|
||||||
|
case utilities.OpLitPush:
|
||||||
|
stack = append(stack, p.pool[op.operand])
|
||||||
|
case utilities.OpPushM:
|
||||||
|
stack = append(stack, "**")
|
||||||
|
case utilities.OpConcatN:
|
||||||
|
n := op.operand
|
||||||
|
l := len(stack) - n
|
||||||
|
stack = append(stack[:l], strings.Join(stack[l:], "/"))
|
||||||
|
case utilities.OpCapture:
|
||||||
|
n := len(stack) - 1
|
||||||
|
stack[n] = fmt.Sprintf("{%s=%s}", p.vars[op.operand], stack[n])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
segs := strings.Join(stack, "/")
|
||||||
|
if p.verb != "" {
|
||||||
|
return fmt.Sprintf("/%s:%s", segs, p.verb)
|
||||||
|
}
|
||||||
|
return "/" + segs
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The following code is adopted and modified from Go's standard library
|
||||||
|
* and carries the attached license.
|
||||||
|
*
|
||||||
|
* Copyright 2009 The Go Authors. All rights reserved.
|
||||||
|
* Use of this source code is governed by a BSD-style
|
||||||
|
* license that can be found in the LICENSE file.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// ishex returns whether or not the given byte is a valid hex character
|
||||||
|
func ishex(c byte) bool {
|
||||||
|
switch {
|
||||||
|
case '0' <= c && c <= '9':
|
||||||
|
return true
|
||||||
|
case 'a' <= c && c <= 'f':
|
||||||
|
return true
|
||||||
|
case 'A' <= c && c <= 'F':
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func isRFC6570Reserved(c byte) bool {
|
||||||
|
switch c {
|
||||||
|
case '!', '#', '$', '&', '\'', '(', ')', '*',
|
||||||
|
'+', ',', '/', ':', ';', '=', '?', '@', '[', ']':
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// unhex converts a hex point to the bit representation
|
||||||
|
func unhex(c byte) byte {
|
||||||
|
switch {
|
||||||
|
case '0' <= c && c <= '9':
|
||||||
|
return c - '0'
|
||||||
|
case 'a' <= c && c <= 'f':
|
||||||
|
return c - 'a' + 10
|
||||||
|
case 'A' <= c && c <= 'F':
|
||||||
|
return c - 'A' + 10
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// shouldUnescapeWithMode returns true if the character is escapable with the
|
||||||
|
// given mode
|
||||||
|
func shouldUnescapeWithMode(c byte, mode UnescapingMode) bool {
|
||||||
|
switch mode {
|
||||||
|
case UnescapingModeAllExceptReserved:
|
||||||
|
if isRFC6570Reserved(c) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
case UnescapingModeAllExceptSlash:
|
||||||
|
if c == '/' {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
case UnescapingModeAllCharacters:
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// unescape unescapes a path string using the provided mode
|
||||||
|
func unescape(s string, mode UnescapingMode, multisegment bool) (string, error) {
|
||||||
|
// TODO(v3): remove UnescapingModeLegacy
|
||||||
|
if mode == UnescapingModeLegacy {
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if !multisegment {
|
||||||
|
mode = UnescapingModeAllCharacters
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count %, check that they're well-formed.
|
||||||
|
n := 0
|
||||||
|
for i := 0; i < len(s); {
|
||||||
|
if s[i] == '%' {
|
||||||
|
n++
|
||||||
|
if i+2 >= len(s) || !ishex(s[i+1]) || !ishex(s[i+2]) {
|
||||||
|
s = s[i:]
|
||||||
|
if len(s) > 3 {
|
||||||
|
s = s[:3]
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", MalformedSequenceError(s)
|
||||||
|
}
|
||||||
|
i += 3
|
||||||
|
} else {
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if n == 0 {
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var t strings.Builder
|
||||||
|
t.Grow(len(s))
|
||||||
|
for i := 0; i < len(s); i++ {
|
||||||
|
switch s[i] {
|
||||||
|
case '%':
|
||||||
|
c := unhex(s[i+1])<<4 | unhex(s[i+2])
|
||||||
|
if shouldUnescapeWithMode(c, mode) {
|
||||||
|
t.WriteByte(c)
|
||||||
|
i += 2
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fallthrough
|
||||||
|
default:
|
||||||
|
t.WriteByte(s[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return t.String(), nil
|
||||||
|
}
|
80
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go
generated
vendored
Normal file
80
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go
generated
vendored
Normal file
|
@ -0,0 +1,80 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StringP returns a pointer to a string whose pointee is same as the given string value.
|
||||||
|
func StringP(val string) (*string, error) {
|
||||||
|
return proto.String(val), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BoolP parses the given string representation of a boolean value,
|
||||||
|
// and returns a pointer to a bool whose value is same as the parsed value.
|
||||||
|
func BoolP(val string) (*bool, error) {
|
||||||
|
b, err := Bool(val)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return proto.Bool(b), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64P parses the given string representation of a floating point number,
|
||||||
|
// and returns a pointer to a float64 whose value is same as the parsed number.
|
||||||
|
func Float64P(val string) (*float64, error) {
|
||||||
|
f, err := Float64(val)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return proto.Float64(f), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float32P parses the given string representation of a floating point number,
|
||||||
|
// and returns a pointer to a float32 whose value is same as the parsed number.
|
||||||
|
func Float32P(val string) (*float32, error) {
|
||||||
|
f, err := Float32(val)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return proto.Float32(f), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int64P parses the given string representation of an integer
|
||||||
|
// and returns a pointer to a int64 whose value is same as the parsed integer.
|
||||||
|
func Int64P(val string) (*int64, error) {
|
||||||
|
i, err := Int64(val)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return proto.Int64(i), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int32P parses the given string representation of an integer
|
||||||
|
// and returns a pointer to a int32 whose value is same as the parsed integer.
|
||||||
|
func Int32P(val string) (*int32, error) {
|
||||||
|
i, err := Int32(val)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return proto.Int32(i), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint64P parses the given string representation of an integer
|
||||||
|
// and returns a pointer to a uint64 whose value is same as the parsed integer.
|
||||||
|
func Uint64P(val string) (*uint64, error) {
|
||||||
|
i, err := Uint64(val)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return proto.Uint64(i), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint32P parses the given string representation of an integer
|
||||||
|
// and returns a pointer to a uint32 whose value is same as the parsed integer.
|
||||||
|
func Uint32P(val string) (*uint32, error) {
|
||||||
|
i, err := Uint32(val)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return proto.Uint32(i), err
|
||||||
|
}
|
338
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go
generated
vendored
Normal file
338
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go
generated
vendored
Normal file
|
@ -0,0 +1,338 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
|
"google.golang.org/protobuf/encoding/protojson"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
"google.golang.org/protobuf/reflect/protoregistry"
|
||||||
|
"google.golang.org/protobuf/types/known/durationpb"
|
||||||
|
field_mask "google.golang.org/protobuf/types/known/fieldmaskpb"
|
||||||
|
"google.golang.org/protobuf/types/known/structpb"
|
||||||
|
"google.golang.org/protobuf/types/known/timestamppb"
|
||||||
|
"google.golang.org/protobuf/types/known/wrapperspb"
|
||||||
|
)
|
||||||
|
|
||||||
|
var valuesKeyRegexp = regexp.MustCompile(`^(.*)\[(.*)\]$`)
|
||||||
|
|
||||||
|
var currentQueryParser QueryParameterParser = &DefaultQueryParser{}
|
||||||
|
|
||||||
|
// QueryParameterParser defines interface for all query parameter parsers
|
||||||
|
type QueryParameterParser interface {
|
||||||
|
Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// PopulateQueryParameters parses query parameters
|
||||||
|
// into "msg" using current query parser
|
||||||
|
func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
|
||||||
|
return currentQueryParser.Parse(msg, values, filter)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultQueryParser is a QueryParameterParser which implements the default
|
||||||
|
// query parameters parsing behavior.
|
||||||
|
//
|
||||||
|
// See https://github.com/grpc-ecosystem/grpc-gateway/issues/2632 for more context.
|
||||||
|
type DefaultQueryParser struct{}
|
||||||
|
|
||||||
|
// Parse populates "values" into "msg".
|
||||||
|
// A value is ignored if its key starts with one of the elements in "filter".
|
||||||
|
func (*DefaultQueryParser) Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
|
||||||
|
for key, values := range values {
|
||||||
|
if match := valuesKeyRegexp.FindStringSubmatch(key); len(match) == 3 {
|
||||||
|
key = match[1]
|
||||||
|
values = append([]string{match[2]}, values...)
|
||||||
|
}
|
||||||
|
fieldPath := strings.Split(key, ".")
|
||||||
|
if filter.HasCommonPrefix(fieldPath) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := populateFieldValueFromPath(msg.ProtoReflect(), fieldPath, values); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PopulateFieldFromPath sets a value in a nested Protobuf structure.
|
||||||
|
func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value string) error {
|
||||||
|
fieldPath := strings.Split(fieldPathString, ".")
|
||||||
|
return populateFieldValueFromPath(msg.ProtoReflect(), fieldPath, []string{value})
|
||||||
|
}
|
||||||
|
|
||||||
|
func populateFieldValueFromPath(msgValue protoreflect.Message, fieldPath []string, values []string) error {
|
||||||
|
if len(fieldPath) < 1 {
|
||||||
|
return errors.New("no field path")
|
||||||
|
}
|
||||||
|
if len(values) < 1 {
|
||||||
|
return errors.New("no value provided")
|
||||||
|
}
|
||||||
|
|
||||||
|
var fieldDescriptor protoreflect.FieldDescriptor
|
||||||
|
for i, fieldName := range fieldPath {
|
||||||
|
fields := msgValue.Descriptor().Fields()
|
||||||
|
|
||||||
|
// Get field by name
|
||||||
|
fieldDescriptor = fields.ByName(protoreflect.Name(fieldName))
|
||||||
|
if fieldDescriptor == nil {
|
||||||
|
fieldDescriptor = fields.ByJSONName(fieldName)
|
||||||
|
if fieldDescriptor == nil {
|
||||||
|
// We're not returning an error here because this could just be
|
||||||
|
// an extra query parameter that isn't part of the request.
|
||||||
|
grpclog.Infof("field not found in %q: %q", msgValue.Descriptor().FullName(), strings.Join(fieldPath, "."))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If this is the last element, we're done
|
||||||
|
if i == len(fieldPath)-1 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only singular message fields are allowed
|
||||||
|
if fieldDescriptor.Message() == nil || fieldDescriptor.Cardinality() == protoreflect.Repeated {
|
||||||
|
return fmt.Errorf("invalid path: %q is not a message", fieldName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the nested message
|
||||||
|
msgValue = msgValue.Mutable(fieldDescriptor).Message()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if oneof already set
|
||||||
|
if of := fieldDescriptor.ContainingOneof(); of != nil {
|
||||||
|
if f := msgValue.WhichOneof(of); f != nil {
|
||||||
|
return fmt.Errorf("field already set for oneof %q", of.FullName().Name())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case fieldDescriptor.IsList():
|
||||||
|
return populateRepeatedField(fieldDescriptor, msgValue.Mutable(fieldDescriptor).List(), values)
|
||||||
|
case fieldDescriptor.IsMap():
|
||||||
|
return populateMapField(fieldDescriptor, msgValue.Mutable(fieldDescriptor).Map(), values)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(values) > 1 {
|
||||||
|
return fmt.Errorf("too many values for field %q: %s", fieldDescriptor.FullName().Name(), strings.Join(values, ", "))
|
||||||
|
}
|
||||||
|
|
||||||
|
return populateField(fieldDescriptor, msgValue, values[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
func populateField(fieldDescriptor protoreflect.FieldDescriptor, msgValue protoreflect.Message, value string) error {
|
||||||
|
v, err := parseField(fieldDescriptor, value)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("parsing field %q: %w", fieldDescriptor.FullName().Name(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
msgValue.Set(fieldDescriptor, v)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func populateRepeatedField(fieldDescriptor protoreflect.FieldDescriptor, list protoreflect.List, values []string) error {
|
||||||
|
for _, value := range values {
|
||||||
|
v, err := parseField(fieldDescriptor, value)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("parsing list %q: %w", fieldDescriptor.FullName().Name(), err)
|
||||||
|
}
|
||||||
|
list.Append(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func populateMapField(fieldDescriptor protoreflect.FieldDescriptor, mp protoreflect.Map, values []string) error {
|
||||||
|
if len(values) != 2 {
|
||||||
|
return fmt.Errorf("more than one value provided for key %q in map %q", values[0], fieldDescriptor.FullName())
|
||||||
|
}
|
||||||
|
|
||||||
|
key, err := parseField(fieldDescriptor.MapKey(), values[0])
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("parsing map key %q: %w", fieldDescriptor.FullName().Name(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
value, err := parseField(fieldDescriptor.MapValue(), values[1])
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("parsing map value %q: %w", fieldDescriptor.FullName().Name(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
mp.Set(key.MapKey(), value)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseField(fieldDescriptor protoreflect.FieldDescriptor, value string) (protoreflect.Value, error) {
|
||||||
|
switch fieldDescriptor.Kind() {
|
||||||
|
case protoreflect.BoolKind:
|
||||||
|
v, err := strconv.ParseBool(value)
|
||||||
|
if err != nil {
|
||||||
|
return protoreflect.Value{}, err
|
||||||
|
}
|
||||||
|
return protoreflect.ValueOfBool(v), nil
|
||||||
|
case protoreflect.EnumKind:
|
||||||
|
enum, err := protoregistry.GlobalTypes.FindEnumByName(fieldDescriptor.Enum().FullName())
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, protoregistry.NotFound) {
|
||||||
|
return protoreflect.Value{}, fmt.Errorf("enum %q is not registered", fieldDescriptor.Enum().FullName())
|
||||||
|
}
|
||||||
|
return protoreflect.Value{}, fmt.Errorf("failed to look up enum: %w", err)
|
||||||
|
}
|
||||||
|
// Look for enum by name
|
||||||
|
v := enum.Descriptor().Values().ByName(protoreflect.Name(value))
|
||||||
|
if v == nil {
|
||||||
|
i, err := strconv.Atoi(value)
|
||||||
|
if err != nil {
|
||||||
|
return protoreflect.Value{}, fmt.Errorf("%q is not a valid value", value)
|
||||||
|
}
|
||||||
|
// Look for enum by number
|
||||||
|
if v = enum.Descriptor().Values().ByNumber(protoreflect.EnumNumber(i)); v == nil {
|
||||||
|
return protoreflect.Value{}, fmt.Errorf("%q is not a valid value", value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return protoreflect.ValueOfEnum(v.Number()), nil
|
||||||
|
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
|
||||||
|
v, err := strconv.ParseInt(value, 10, 32)
|
||||||
|
if err != nil {
|
||||||
|
return protoreflect.Value{}, err
|
||||||
|
}
|
||||||
|
return protoreflect.ValueOfInt32(int32(v)), nil
|
||||||
|
case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
|
||||||
|
v, err := strconv.ParseInt(value, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return protoreflect.Value{}, err
|
||||||
|
}
|
||||||
|
return protoreflect.ValueOfInt64(v), nil
|
||||||
|
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
|
||||||
|
v, err := strconv.ParseUint(value, 10, 32)
|
||||||
|
if err != nil {
|
||||||
|
return protoreflect.Value{}, err
|
||||||
|
}
|
||||||
|
return protoreflect.ValueOfUint32(uint32(v)), nil
|
||||||
|
case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
|
||||||
|
v, err := strconv.ParseUint(value, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return protoreflect.Value{}, err
|
||||||
|
}
|
||||||
|
return protoreflect.ValueOfUint64(v), nil
|
||||||
|
case protoreflect.FloatKind:
|
||||||
|
v, err := strconv.ParseFloat(value, 32)
|
||||||
|
if err != nil {
|
||||||
|
return protoreflect.Value{}, err
|
||||||
|
}
|
||||||
|
return protoreflect.ValueOfFloat32(float32(v)), nil
|
||||||
|
case protoreflect.DoubleKind:
|
||||||
|
v, err := strconv.ParseFloat(value, 64)
|
||||||
|
if err != nil {
|
||||||
|
return protoreflect.Value{}, err
|
||||||
|
}
|
||||||
|
return protoreflect.ValueOfFloat64(v), nil
|
||||||
|
case protoreflect.StringKind:
|
||||||
|
return protoreflect.ValueOfString(value), nil
|
||||||
|
case protoreflect.BytesKind:
|
||||||
|
v, err := Bytes(value)
|
||||||
|
if err != nil {
|
||||||
|
return protoreflect.Value{}, err
|
||||||
|
}
|
||||||
|
return protoreflect.ValueOfBytes(v), nil
|
||||||
|
case protoreflect.MessageKind, protoreflect.GroupKind:
|
||||||
|
return parseMessage(fieldDescriptor.Message(), value)
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("unknown field kind: %v", fieldDescriptor.Kind()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseMessage(msgDescriptor protoreflect.MessageDescriptor, value string) (protoreflect.Value, error) {
|
||||||
|
var msg proto.Message
|
||||||
|
switch msgDescriptor.FullName() {
|
||||||
|
case "google.protobuf.Timestamp":
|
||||||
|
t, err := time.Parse(time.RFC3339Nano, value)
|
||||||
|
if err != nil {
|
||||||
|
return protoreflect.Value{}, err
|
||||||
|
}
|
||||||
|
msg = timestamppb.New(t)
|
||||||
|
case "google.protobuf.Duration":
|
||||||
|
d, err := time.ParseDuration(value)
|
||||||
|
if err != nil {
|
||||||
|
return protoreflect.Value{}, err
|
||||||
|
}
|
||||||
|
msg = durationpb.New(d)
|
||||||
|
case "google.protobuf.DoubleValue":
|
||||||
|
v, err := strconv.ParseFloat(value, 64)
|
||||||
|
if err != nil {
|
||||||
|
return protoreflect.Value{}, err
|
||||||
|
}
|
||||||
|
msg = wrapperspb.Double(v)
|
||||||
|
case "google.protobuf.FloatValue":
|
||||||
|
v, err := strconv.ParseFloat(value, 32)
|
||||||
|
if err != nil {
|
||||||
|
return protoreflect.Value{}, err
|
||||||
|
}
|
||||||
|
msg = wrapperspb.Float(float32(v))
|
||||||
|
case "google.protobuf.Int64Value":
|
||||||
|
v, err := strconv.ParseInt(value, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return protoreflect.Value{}, err
|
||||||
|
}
|
||||||
|
msg = wrapperspb.Int64(v)
|
||||||
|
case "google.protobuf.Int32Value":
|
||||||
|
v, err := strconv.ParseInt(value, 10, 32)
|
||||||
|
if err != nil {
|
||||||
|
return protoreflect.Value{}, err
|
||||||
|
}
|
||||||
|
msg = wrapperspb.Int32(int32(v))
|
||||||
|
case "google.protobuf.UInt64Value":
|
||||||
|
v, err := strconv.ParseUint(value, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return protoreflect.Value{}, err
|
||||||
|
}
|
||||||
|
msg = wrapperspb.UInt64(v)
|
||||||
|
case "google.protobuf.UInt32Value":
|
||||||
|
v, err := strconv.ParseUint(value, 10, 32)
|
||||||
|
if err != nil {
|
||||||
|
return protoreflect.Value{}, err
|
||||||
|
}
|
||||||
|
msg = wrapperspb.UInt32(uint32(v))
|
||||||
|
case "google.protobuf.BoolValue":
|
||||||
|
v, err := strconv.ParseBool(value)
|
||||||
|
if err != nil {
|
||||||
|
return protoreflect.Value{}, err
|
||||||
|
}
|
||||||
|
msg = wrapperspb.Bool(v)
|
||||||
|
case "google.protobuf.StringValue":
|
||||||
|
msg = wrapperspb.String(value)
|
||||||
|
case "google.protobuf.BytesValue":
|
||||||
|
v, err := Bytes(value)
|
||||||
|
if err != nil {
|
||||||
|
return protoreflect.Value{}, err
|
||||||
|
}
|
||||||
|
msg = wrapperspb.Bytes(v)
|
||||||
|
case "google.protobuf.FieldMask":
|
||||||
|
fm := &field_mask.FieldMask{}
|
||||||
|
fm.Paths = append(fm.Paths, strings.Split(value, ",")...)
|
||||||
|
msg = fm
|
||||||
|
case "google.protobuf.Value":
|
||||||
|
var v structpb.Value
|
||||||
|
if err := protojson.Unmarshal([]byte(value), &v); err != nil {
|
||||||
|
return protoreflect.Value{}, err
|
||||||
|
}
|
||||||
|
msg = &v
|
||||||
|
case "google.protobuf.Struct":
|
||||||
|
var v structpb.Struct
|
||||||
|
if err := protojson.Unmarshal([]byte(value), &v); err != nil {
|
||||||
|
return protoreflect.Value{}, err
|
||||||
|
}
|
||||||
|
msg = &v
|
||||||
|
default:
|
||||||
|
return protoreflect.Value{}, fmt.Errorf("unsupported message type: %q", string(msgDescriptor.FullName()))
|
||||||
|
}
|
||||||
|
|
||||||
|
return protoreflect.ValueOfMessage(msg.ProtoReflect()), nil
|
||||||
|
}
|
31
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel
generated
vendored
Normal file
31
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel
generated
vendored
Normal file
|
@ -0,0 +1,31 @@
|
||||||
|
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||||
|
|
||||||
|
package(default_visibility = ["//visibility:public"])
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "utilities",
|
||||||
|
srcs = [
|
||||||
|
"doc.go",
|
||||||
|
"pattern.go",
|
||||||
|
"readerfactory.go",
|
||||||
|
"string_array_flag.go",
|
||||||
|
"trie.go",
|
||||||
|
],
|
||||||
|
importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/utilities",
|
||||||
|
)
|
||||||
|
|
||||||
|
go_test(
|
||||||
|
name = "utilities_test",
|
||||||
|
size = "small",
|
||||||
|
srcs = [
|
||||||
|
"string_array_flag_test.go",
|
||||||
|
"trie_test.go",
|
||||||
|
],
|
||||||
|
deps = [":utilities"],
|
||||||
|
)
|
||||||
|
|
||||||
|
alias(
|
||||||
|
name = "go_default_library",
|
||||||
|
actual = ":utilities",
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
)
|
|
@ -0,0 +1,2 @@
|
||||||
|
// Package utilities provides members for internal use in grpc-gateway.
|
||||||
|
package utilities
|
22
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go
generated
vendored
Normal file
22
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
package utilities
|
||||||
|
|
||||||
|
// An OpCode is a opcode of compiled path patterns.
|
||||||
|
type OpCode int
|
||||||
|
|
||||||
|
// These constants are the valid values of OpCode.
|
||||||
|
const (
|
||||||
|
// OpNop does nothing
|
||||||
|
OpNop = OpCode(iota)
|
||||||
|
// OpPush pushes a component to stack
|
||||||
|
OpPush
|
||||||
|
// OpLitPush pushes a component to stack if it matches to the literal
|
||||||
|
OpLitPush
|
||||||
|
// OpPushM concatenates the remaining components and pushes it to stack
|
||||||
|
OpPushM
|
||||||
|
// OpConcatN pops N items from stack, concatenates them and pushes it back to stack
|
||||||
|
OpConcatN
|
||||||
|
// OpCapture pops an item and binds it to the variable
|
||||||
|
OpCapture
|
||||||
|
// OpEnd is the least positive invalid opcode.
|
||||||
|
OpEnd
|
||||||
|
)
|
19
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go
generated
vendored
Normal file
19
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go
generated
vendored
Normal file
|
@ -0,0 +1,19 @@
|
||||||
|
package utilities
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IOReaderFactory takes in an io.Reader and returns a function that will allow you to create a new reader that begins
|
||||||
|
// at the start of the stream
|
||||||
|
func IOReaderFactory(r io.Reader) (func() io.Reader, error) {
|
||||||
|
b, err := io.ReadAll(r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return func() io.Reader {
|
||||||
|
return bytes.NewReader(b)
|
||||||
|
}, nil
|
||||||
|
}
|
33
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go
generated
vendored
Normal file
33
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go
generated
vendored
Normal file
|
@ -0,0 +1,33 @@
|
||||||
|
package utilities
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// flagInterface is an cut down interface to `flag`
|
||||||
|
type flagInterface interface {
|
||||||
|
Var(value flag.Value, name string, usage string)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringArrayFlag defines a flag with the specified name and usage string.
|
||||||
|
// The return value is the address of a `StringArrayFlags` variable that stores the repeated values of the flag.
|
||||||
|
func StringArrayFlag(f flagInterface, name string, usage string) *StringArrayFlags {
|
||||||
|
value := &StringArrayFlags{}
|
||||||
|
f.Var(value, name, usage)
|
||||||
|
return value
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringArrayFlags is a wrapper of `[]string` to provider an interface for `flag.Var`
|
||||||
|
type StringArrayFlags []string
|
||||||
|
|
||||||
|
// String returns a string representation of `StringArrayFlags`
|
||||||
|
func (i *StringArrayFlags) String() string {
|
||||||
|
return strings.Join(*i, ",")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set appends a value to `StringArrayFlags`
|
||||||
|
func (i *StringArrayFlags) Set(value string) error {
|
||||||
|
*i = append(*i, value)
|
||||||
|
return nil
|
||||||
|
}
|
174
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go
generated
vendored
Normal file
174
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go
generated
vendored
Normal file
|
@ -0,0 +1,174 @@
|
||||||
|
package utilities
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DoubleArray is a Double Array implementation of trie on sequences of strings.
|
||||||
|
type DoubleArray struct {
|
||||||
|
// Encoding keeps an encoding from string to int
|
||||||
|
Encoding map[string]int
|
||||||
|
// Base is the base array of Double Array
|
||||||
|
Base []int
|
||||||
|
// Check is the check array of Double Array
|
||||||
|
Check []int
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDoubleArray builds a DoubleArray from a set of sequences of strings.
|
||||||
|
func NewDoubleArray(seqs [][]string) *DoubleArray {
|
||||||
|
da := &DoubleArray{Encoding: make(map[string]int)}
|
||||||
|
if len(seqs) == 0 {
|
||||||
|
return da
|
||||||
|
}
|
||||||
|
|
||||||
|
encoded := registerTokens(da, seqs)
|
||||||
|
sort.Sort(byLex(encoded))
|
||||||
|
|
||||||
|
root := node{row: -1, col: -1, left: 0, right: len(encoded)}
|
||||||
|
addSeqs(da, encoded, 0, root)
|
||||||
|
|
||||||
|
for i := len(da.Base); i > 0; i-- {
|
||||||
|
if da.Check[i-1] != 0 {
|
||||||
|
da.Base = da.Base[:i]
|
||||||
|
da.Check = da.Check[:i]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return da
|
||||||
|
}
|
||||||
|
|
||||||
|
func registerTokens(da *DoubleArray, seqs [][]string) [][]int {
|
||||||
|
var result [][]int
|
||||||
|
for _, seq := range seqs {
|
||||||
|
encoded := make([]int, 0, len(seq))
|
||||||
|
for _, token := range seq {
|
||||||
|
if _, ok := da.Encoding[token]; !ok {
|
||||||
|
da.Encoding[token] = len(da.Encoding)
|
||||||
|
}
|
||||||
|
encoded = append(encoded, da.Encoding[token])
|
||||||
|
}
|
||||||
|
result = append(result, encoded)
|
||||||
|
}
|
||||||
|
for i := range result {
|
||||||
|
result[i] = append(result[i], len(da.Encoding))
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
type node struct {
|
||||||
|
row, col int
|
||||||
|
left, right int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n node) value(seqs [][]int) int {
|
||||||
|
return seqs[n.row][n.col]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n node) children(seqs [][]int) []*node {
|
||||||
|
var result []*node
|
||||||
|
lastVal := int(-1)
|
||||||
|
last := new(node)
|
||||||
|
for i := n.left; i < n.right; i++ {
|
||||||
|
if lastVal == seqs[i][n.col+1] {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
last.right = i
|
||||||
|
last = &node{
|
||||||
|
row: i,
|
||||||
|
col: n.col + 1,
|
||||||
|
left: i,
|
||||||
|
}
|
||||||
|
result = append(result, last)
|
||||||
|
}
|
||||||
|
last.right = n.right
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
func addSeqs(da *DoubleArray, seqs [][]int, pos int, n node) {
|
||||||
|
ensureSize(da, pos)
|
||||||
|
|
||||||
|
children := n.children(seqs)
|
||||||
|
var i int
|
||||||
|
for i = 1; ; i++ {
|
||||||
|
ok := func() bool {
|
||||||
|
for _, child := range children {
|
||||||
|
code := child.value(seqs)
|
||||||
|
j := i + code
|
||||||
|
ensureSize(da, j)
|
||||||
|
if da.Check[j] != 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}()
|
||||||
|
if ok {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
da.Base[pos] = i
|
||||||
|
for _, child := range children {
|
||||||
|
code := child.value(seqs)
|
||||||
|
j := i + code
|
||||||
|
da.Check[j] = pos + 1
|
||||||
|
}
|
||||||
|
terminator := len(da.Encoding)
|
||||||
|
for _, child := range children {
|
||||||
|
code := child.value(seqs)
|
||||||
|
if code == terminator {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
j := i + code
|
||||||
|
addSeqs(da, seqs, j, *child)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ensureSize(da *DoubleArray, i int) {
|
||||||
|
for i >= len(da.Base) {
|
||||||
|
da.Base = append(da.Base, make([]int, len(da.Base)+1)...)
|
||||||
|
da.Check = append(da.Check, make([]int, len(da.Check)+1)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type byLex [][]int
|
||||||
|
|
||||||
|
func (l byLex) Len() int { return len(l) }
|
||||||
|
func (l byLex) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
|
||||||
|
func (l byLex) Less(i, j int) bool {
|
||||||
|
si := l[i]
|
||||||
|
sj := l[j]
|
||||||
|
var k int
|
||||||
|
for k = 0; k < len(si) && k < len(sj); k++ {
|
||||||
|
if si[k] < sj[k] {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if si[k] > sj[k] {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return k < len(sj)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasCommonPrefix determines if any sequence in the DoubleArray is a prefix of the given sequence.
|
||||||
|
func (da *DoubleArray) HasCommonPrefix(seq []string) bool {
|
||||||
|
if len(da.Base) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
var i int
|
||||||
|
for _, t := range seq {
|
||||||
|
code, ok := da.Encoding[t]
|
||||||
|
if !ok {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
j := da.Base[i] + code
|
||||||
|
if len(da.Check) <= j || da.Check[j] != i+1 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
i = j
|
||||||
|
}
|
||||||
|
j := da.Base[i] + len(da.Encoding)
|
||||||
|
if len(da.Check) <= j || da.Check[j] != i+1 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
201
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/LICENSE
generated
vendored
Normal file
201
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,201 @@
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
211
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go
generated
vendored
Normal file
211
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go
generated
vendored
Normal file
|
@ -0,0 +1,211 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"google.golang.org/genproto/googleapis/rpc/errdetails"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel"
|
||||||
|
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal"
|
||||||
|
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
|
||||||
|
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry"
|
||||||
|
colmetricpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1"
|
||||||
|
metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
type client struct {
|
||||||
|
metadata metadata.MD
|
||||||
|
exportTimeout time.Duration
|
||||||
|
requestFunc retry.RequestFunc
|
||||||
|
|
||||||
|
// ourConn keeps track of where conn was created: true if created here in
|
||||||
|
// NewClient, or false if passed with an option. This is important on
|
||||||
|
// Shutdown as the conn should only be closed if we created it. Otherwise,
|
||||||
|
// it is up to the processes that passed the conn to close it.
|
||||||
|
ourConn bool
|
||||||
|
conn *grpc.ClientConn
|
||||||
|
msc colmetricpb.MetricsServiceClient
|
||||||
|
}
|
||||||
|
|
||||||
|
// newClient creates a new gRPC metric client.
|
||||||
|
func newClient(ctx context.Context, cfg oconf.Config) (*client, error) {
|
||||||
|
c := &client{
|
||||||
|
exportTimeout: cfg.Metrics.Timeout,
|
||||||
|
requestFunc: cfg.RetryConfig.RequestFunc(retryable),
|
||||||
|
conn: cfg.GRPCConn,
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(cfg.Metrics.Headers) > 0 {
|
||||||
|
c.metadata = metadata.New(cfg.Metrics.Headers)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.conn == nil {
|
||||||
|
// If the caller did not provide a ClientConn when the client was
|
||||||
|
// created, create one using the configuration they did provide.
|
||||||
|
userAgent := "OTel Go OTLP over gRPC metrics exporter/" + Version()
|
||||||
|
dialOpts := []grpc.DialOption{grpc.WithUserAgent(userAgent)}
|
||||||
|
dialOpts = append(dialOpts, cfg.DialOptions...)
|
||||||
|
|
||||||
|
conn, err := grpc.DialContext(ctx, cfg.Metrics.Endpoint, dialOpts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// Keep track that we own the lifecycle of this conn and need to close
|
||||||
|
// it on Shutdown.
|
||||||
|
c.ourConn = true
|
||||||
|
c.conn = conn
|
||||||
|
}
|
||||||
|
|
||||||
|
c.msc = colmetricpb.NewMetricsServiceClient(c.conn)
|
||||||
|
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shutdown shuts down the client, freeing all resource.
|
||||||
|
//
|
||||||
|
// Any active connections to a remote endpoint are closed if they were created
|
||||||
|
// by the client. Any gRPC connection passed during creation using
|
||||||
|
// WithGRPCConn will not be closed. It is the caller's responsibility to
|
||||||
|
// handle cleanup of that resource.
|
||||||
|
func (c *client) Shutdown(ctx context.Context) error {
|
||||||
|
// The otlpmetric.Exporter synchronizes access to client methods and
|
||||||
|
// ensures this is called only once. The only thing that needs to be done
|
||||||
|
// here is to release any computational resources the client holds.
|
||||||
|
|
||||||
|
c.metadata = nil
|
||||||
|
c.requestFunc = nil
|
||||||
|
c.msc = nil
|
||||||
|
|
||||||
|
err := ctx.Err()
|
||||||
|
if c.ourConn {
|
||||||
|
closeErr := c.conn.Close()
|
||||||
|
// A context timeout error takes precedence over this error.
|
||||||
|
if err == nil && closeErr != nil {
|
||||||
|
err = closeErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
c.conn = nil
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadMetrics sends protoMetrics to connected endpoint.
|
||||||
|
//
|
||||||
|
// Retryable errors from the server will be handled according to any
|
||||||
|
// RetryConfig the client was created with.
|
||||||
|
func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.ResourceMetrics) error {
|
||||||
|
// The otlpmetric.Exporter synchronizes access to client methods, and
|
||||||
|
// ensures this is not called after the Exporter is shutdown. Only thing
|
||||||
|
// to do here is send data.
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
// Do not upload if the context is already expired.
|
||||||
|
return ctx.Err()
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := c.exportContext(ctx)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
return c.requestFunc(ctx, func(iCtx context.Context) error {
|
||||||
|
resp, err := c.msc.Export(iCtx, &colmetricpb.ExportMetricsServiceRequest{
|
||||||
|
ResourceMetrics: []*metricpb.ResourceMetrics{protoMetrics},
|
||||||
|
})
|
||||||
|
if resp != nil && resp.PartialSuccess != nil {
|
||||||
|
msg := resp.PartialSuccess.GetErrorMessage()
|
||||||
|
n := resp.PartialSuccess.GetRejectedDataPoints()
|
||||||
|
if n != 0 || msg != "" {
|
||||||
|
err := internal.MetricPartialSuccessError(n, msg)
|
||||||
|
otel.Handle(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// nil is converted to OK.
|
||||||
|
if status.Code(err) == codes.OK {
|
||||||
|
// Success.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// exportContext returns a copy of parent with an appropriate deadline and
|
||||||
|
// cancellation function based on the clients configured export timeout.
|
||||||
|
//
|
||||||
|
// It is the callers responsibility to cancel the returned context once its
|
||||||
|
// use is complete, via the parent or directly with the returned CancelFunc, to
|
||||||
|
// ensure all resources are correctly released.
|
||||||
|
func (c *client) exportContext(parent context.Context) (context.Context, context.CancelFunc) {
|
||||||
|
var (
|
||||||
|
ctx context.Context
|
||||||
|
cancel context.CancelFunc
|
||||||
|
)
|
||||||
|
|
||||||
|
if c.exportTimeout > 0 {
|
||||||
|
ctx, cancel = context.WithTimeout(parent, c.exportTimeout)
|
||||||
|
} else {
|
||||||
|
ctx, cancel = context.WithCancel(parent)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.metadata.Len() > 0 {
|
||||||
|
ctx = metadata.NewOutgoingContext(ctx, c.metadata)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ctx, cancel
|
||||||
|
}
|
||||||
|
|
||||||
|
// retryable returns if err identifies a request that can be retried and a
|
||||||
|
// duration to wait for if an explicit throttle time is included in err.
|
||||||
|
func retryable(err error) (bool, time.Duration) {
|
||||||
|
s := status.Convert(err)
|
||||||
|
return retryableGRPCStatus(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func retryableGRPCStatus(s *status.Status) (bool, time.Duration) {
|
||||||
|
switch s.Code() {
|
||||||
|
case codes.Canceled,
|
||||||
|
codes.DeadlineExceeded,
|
||||||
|
codes.Aborted,
|
||||||
|
codes.OutOfRange,
|
||||||
|
codes.Unavailable,
|
||||||
|
codes.DataLoss:
|
||||||
|
// Additionally, handle RetryInfo.
|
||||||
|
_, d := throttleDelay(s)
|
||||||
|
return true, d
|
||||||
|
case codes.ResourceExhausted:
|
||||||
|
// Retry only if the server signals that the recovery from resource exhaustion is possible.
|
||||||
|
return throttleDelay(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Not a retry-able error.
|
||||||
|
return false, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// throttleDelay returns if the status is RetryInfo
|
||||||
|
// and the duration to wait for if an explicit throttle time is included.
|
||||||
|
func throttleDelay(s *status.Status) (bool, time.Duration) {
|
||||||
|
for _, detail := range s.Details() {
|
||||||
|
if t, ok := detail.(*errdetails.RetryInfo); ok {
|
||||||
|
return true, t.RetryDelay.AsDuration()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false, 0
|
||||||
|
}
|
250
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go
generated
vendored
Normal file
250
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go
generated
vendored
Normal file
|
@ -0,0 +1,250 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/credentials"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel"
|
||||||
|
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
|
||||||
|
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry"
|
||||||
|
"go.opentelemetry.io/otel/sdk/metric"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Option applies a configuration option to the Exporter.
|
||||||
|
type Option interface {
|
||||||
|
applyGRPCOption(oconf.Config) oconf.Config
|
||||||
|
}
|
||||||
|
|
||||||
|
func asGRPCOptions(opts []Option) []oconf.GRPCOption {
|
||||||
|
converted := make([]oconf.GRPCOption, len(opts))
|
||||||
|
for i, o := range opts {
|
||||||
|
converted[i] = oconf.NewGRPCOption(o.applyGRPCOption)
|
||||||
|
}
|
||||||
|
return converted
|
||||||
|
}
|
||||||
|
|
||||||
|
// RetryConfig defines configuration for retrying the export of metric data
|
||||||
|
// that failed.
|
||||||
|
//
|
||||||
|
// This configuration does not define any network retry strategy. That is
|
||||||
|
// entirely handled by the gRPC ClientConn.
|
||||||
|
type RetryConfig retry.Config
|
||||||
|
|
||||||
|
type wrappedOption struct {
|
||||||
|
oconf.GRPCOption
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w wrappedOption) applyGRPCOption(cfg oconf.Config) oconf.Config {
|
||||||
|
return w.ApplyGRPCOption(cfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithInsecure disables client transport security for the Exporter's gRPC
|
||||||
|
// connection, just like grpc.WithInsecure()
|
||||||
|
// (https://pkg.go.dev/google.golang.org/grpc#WithInsecure) does.
|
||||||
|
//
|
||||||
|
// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
|
||||||
|
// environment variable is set, and this option is not passed, that variable
|
||||||
|
// value will be used to determine client security. If the endpoint has a
|
||||||
|
// scheme of "http" or "unix" client security will be disabled. If both are
|
||||||
|
// set, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT will take precedence.
|
||||||
|
//
|
||||||
|
// By default, if an environment variable is not set, and this option is not
|
||||||
|
// passed, client security will be used.
|
||||||
|
//
|
||||||
|
// This option has no effect if WithGRPCConn is used.
|
||||||
|
func WithInsecure() Option {
|
||||||
|
return wrappedOption{oconf.WithInsecure()}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithEndpoint sets the target endpoint the Exporter will connect to.
|
||||||
|
//
|
||||||
|
// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
|
||||||
|
// environment variable is set, and this option is not passed, that variable
|
||||||
|
// value will be used. If both are set, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
|
||||||
|
// will take precedence.
|
||||||
|
//
|
||||||
|
// By default, if an environment variable is not set, and this option is not
|
||||||
|
// passed, "localhost:4317" will be used.
|
||||||
|
//
|
||||||
|
// This option has no effect if WithGRPCConn is used.
|
||||||
|
func WithEndpoint(endpoint string) Option {
|
||||||
|
return wrappedOption{oconf.WithEndpoint(endpoint)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithReconnectionPeriod set the minimum amount of time between connection
|
||||||
|
// attempts to the target endpoint.
|
||||||
|
//
|
||||||
|
// This option has no effect if WithGRPCConn is used.
|
||||||
|
func WithReconnectionPeriod(rp time.Duration) Option {
|
||||||
|
return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config {
|
||||||
|
cfg.ReconnectionPeriod = rp
|
||||||
|
return cfg
|
||||||
|
})}
|
||||||
|
}
|
||||||
|
|
||||||
|
func compressorToCompression(compressor string) oconf.Compression {
|
||||||
|
if compressor == "gzip" {
|
||||||
|
return oconf.GzipCompression
|
||||||
|
}
|
||||||
|
|
||||||
|
otel.Handle(fmt.Errorf("invalid compression type: '%s', using no compression as default", compressor))
|
||||||
|
return oconf.NoCompression
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithCompressor sets the compressor the gRPC client uses.
|
||||||
|
// Supported compressor values: "gzip".
|
||||||
|
//
|
||||||
|
// If the OTEL_EXPORTER_OTLP_COMPRESSION or
|
||||||
|
// OTEL_EXPORTER_OTLP_METRICS_COMPRESSION environment variable is set, and
|
||||||
|
// this option is not passed, that variable value will be used. That value can
|
||||||
|
// be either "none" or "gzip". If both are set,
|
||||||
|
// OTEL_EXPORTER_OTLP_METRICS_COMPRESSION will take precedence.
|
||||||
|
//
|
||||||
|
// By default, if an environment variable is not set, and this option is not
|
||||||
|
// passed, no compressor will be used.
|
||||||
|
//
|
||||||
|
// This option has no effect if WithGRPCConn is used.
|
||||||
|
func WithCompressor(compressor string) Option {
|
||||||
|
return wrappedOption{oconf.WithCompression(compressorToCompression(compressor))}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithHeaders will send the provided headers with each gRPC requests.
|
||||||
|
//
|
||||||
|
// If the OTEL_EXPORTER_OTLP_HEADERS or OTEL_EXPORTER_OTLP_METRICS_HEADERS
|
||||||
|
// environment variable is set, and this option is not passed, that variable
|
||||||
|
// value will be used. The value will be parsed as a list of key value pairs.
|
||||||
|
// These pairs are expected to be in the W3C Correlation-Context format
|
||||||
|
// without additional semi-colon delimited metadata (i.e. "k1=v1,k2=v2"). If
|
||||||
|
// both are set, OTEL_EXPORTER_OTLP_METRICS_HEADERS will take precedence.
|
||||||
|
//
|
||||||
|
// By default, if an environment variable is not set, and this option is not
|
||||||
|
// passed, no user headers will be set.
|
||||||
|
func WithHeaders(headers map[string]string) Option {
|
||||||
|
return wrappedOption{oconf.WithHeaders(headers)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithTLSCredentials sets the gRPC connection to use creds.
|
||||||
|
//
|
||||||
|
// If the OTEL_EXPORTER_OTLP_CERTIFICATE or
|
||||||
|
// OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE environment variable is set, and
|
||||||
|
// this option is not passed, that variable value will be used. The value will
|
||||||
|
// be parsed the filepath of the TLS certificate chain to use. If both are
|
||||||
|
// set, OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE will take precedence.
|
||||||
|
//
|
||||||
|
// By default, if an environment variable is not set, and this option is not
|
||||||
|
// passed, no TLS credentials will be used.
|
||||||
|
//
|
||||||
|
// This option has no effect if WithGRPCConn is used.
|
||||||
|
func WithTLSCredentials(creds credentials.TransportCredentials) Option {
|
||||||
|
return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config {
|
||||||
|
cfg.Metrics.GRPCCredentials = creds
|
||||||
|
return cfg
|
||||||
|
})}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithServiceConfig defines the default gRPC service config used.
|
||||||
|
//
|
||||||
|
// This option has no effect if WithGRPCConn is used.
|
||||||
|
func WithServiceConfig(serviceConfig string) Option {
|
||||||
|
return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config {
|
||||||
|
cfg.ServiceConfig = serviceConfig
|
||||||
|
return cfg
|
||||||
|
})}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithDialOption sets explicit grpc.DialOptions to use when establishing a
|
||||||
|
// gRPC connection. The options here are appended to the internal grpc.DialOptions
|
||||||
|
// used so they will take precedence over any other internal grpc.DialOptions
|
||||||
|
// they might conflict with.
|
||||||
|
//
|
||||||
|
// This option has no effect if WithGRPCConn is used.
|
||||||
|
func WithDialOption(opts ...grpc.DialOption) Option {
|
||||||
|
return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config {
|
||||||
|
cfg.DialOptions = opts
|
||||||
|
return cfg
|
||||||
|
})}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithGRPCConn sets conn as the gRPC ClientConn used for all communication.
|
||||||
|
//
|
||||||
|
// This option takes precedence over any other option that relates to
|
||||||
|
// establishing or persisting a gRPC connection to a target endpoint. Any
|
||||||
|
// other option of those types passed will be ignored.
|
||||||
|
//
|
||||||
|
// It is the callers responsibility to close the passed conn. The Exporter
|
||||||
|
// Shutdown method will not close this connection.
|
||||||
|
func WithGRPCConn(conn *grpc.ClientConn) Option {
|
||||||
|
return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config {
|
||||||
|
cfg.GRPCConn = conn
|
||||||
|
return cfg
|
||||||
|
})}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithTimeout sets the max amount of time an Exporter will attempt an export.
|
||||||
|
//
|
||||||
|
// This takes precedence over any retry settings defined by WithRetry. Once
|
||||||
|
// this time limit has been reached the export is abandoned and the metric
|
||||||
|
// data is dropped.
|
||||||
|
//
|
||||||
|
// If the OTEL_EXPORTER_OTLP_TIMEOUT or OTEL_EXPORTER_OTLP_METRICS_TIMEOUT
|
||||||
|
// environment variable is set, and this option is not passed, that variable
|
||||||
|
// value will be used. The value will be parsed as an integer representing the
|
||||||
|
// timeout in milliseconds. If both are set,
|
||||||
|
// OTEL_EXPORTER_OTLP_METRICS_TIMEOUT will take precedence.
|
||||||
|
//
|
||||||
|
// By default, if an environment variable is not set, and this option is not
|
||||||
|
// passed, a timeout of 10 seconds will be used.
|
||||||
|
func WithTimeout(duration time.Duration) Option {
|
||||||
|
return wrappedOption{oconf.WithTimeout(duration)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithRetry sets the retry policy for transient retryable errors that are
|
||||||
|
// returned by the target endpoint.
|
||||||
|
//
|
||||||
|
// If the target endpoint responds with not only a retryable error, but
|
||||||
|
// explicitly returns a backoff time in the response, that time will take
|
||||||
|
// precedence over these settings.
|
||||||
|
//
|
||||||
|
// These settings do not define any network retry strategy. That is entirely
|
||||||
|
// handled by the gRPC ClientConn.
|
||||||
|
//
|
||||||
|
// If unset, the default retry policy will be used. It will retry the export
|
||||||
|
// 5 seconds after receiving a retryable error and increase exponentially
|
||||||
|
// after each error for no more than a total time of 1 minute.
|
||||||
|
func WithRetry(settings RetryConfig) Option {
|
||||||
|
return wrappedOption{oconf.WithRetry(retry.Config(settings))}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithTemporalitySelector sets the TemporalitySelector the client will use to
|
||||||
|
// determine the Temporality of an instrument based on its kind. If this option
|
||||||
|
// is not used, the client will use the DefaultTemporalitySelector from the
|
||||||
|
// go.opentelemetry.io/otel/sdk/metric package.
|
||||||
|
func WithTemporalitySelector(selector metric.TemporalitySelector) Option {
|
||||||
|
return wrappedOption{oconf.WithTemporalitySelector(selector)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithAggregationSelector sets the AggregationSelector the client will use to
|
||||||
|
// determine the aggregation to use for an instrument based on its kind. If
|
||||||
|
// this option is not used, the reader will use the DefaultAggregationSelector
|
||||||
|
// from the go.opentelemetry.io/otel/sdk/metric package, or the aggregation
|
||||||
|
// explicitly passed for a view matching an instrument.
|
||||||
|
func WithAggregationSelector(selector metric.AggregationSelector) Option {
|
||||||
|
return wrappedOption{oconf.WithAggregationSelector(selector)}
|
||||||
|
}
|
96
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/doc.go
generated
vendored
Normal file
96
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,96 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package otlpmetricgrpc provides an OTLP metrics exporter using gRPC.
|
||||||
|
By default the telemetry is sent to https://localhost:4317.
|
||||||
|
|
||||||
|
Exporter should be created using [New] and used with a [metric.PeriodicReader].
|
||||||
|
|
||||||
|
The environment variables described below can be used for configuration.
|
||||||
|
|
||||||
|
OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT (default: "https://localhost:4317") -
|
||||||
|
target to which the exporter sends telemetry.
|
||||||
|
The target syntax is defined in https://github.com/grpc/grpc/blob/master/doc/naming.md.
|
||||||
|
The value must contain a host.
|
||||||
|
The value may additionally a port, a scheme, and a path.
|
||||||
|
The value accepts "http" and "https" scheme.
|
||||||
|
The value should not contain a query string or fragment.
|
||||||
|
OTEL_EXPORTER_OTLP_METRICS_ENDPOINT takes precedence over OTEL_EXPORTER_OTLP_ENDPOINT.
|
||||||
|
The configuration can be overridden by [WithEndpoint], [WithInsecure], [WithGRPCConn] options.
|
||||||
|
|
||||||
|
OTEL_EXPORTER_OTLP_INSECURE, OTEL_EXPORTER_OTLP_METRICS_INSECURE (default: "false") -
|
||||||
|
setting "true" disables client transport security for the exporter's gRPC connection.
|
||||||
|
You can use this only when an endpoint is provided without the http or https scheme.
|
||||||
|
OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT setting overrides
|
||||||
|
the scheme defined via OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT.
|
||||||
|
OTEL_EXPORTER_OTLP_METRICS_INSECURE takes precedence over OTEL_EXPORTER_OTLP_INSECURE.
|
||||||
|
The configuration can be overridden by [WithInsecure], [WithGRPCConn] options.
|
||||||
|
|
||||||
|
OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_METRICS_HEADERS (default: none) -
|
||||||
|
key-value pairs used as gRPC metadata associated with gRPC requests.
|
||||||
|
The value is expected to be represented in a format matching to the [W3C Baggage HTTP Header Content Format],
|
||||||
|
except that additional semi-colon delimited metadata is not supported.
|
||||||
|
Example value: "key1=value1,key2=value2".
|
||||||
|
OTEL_EXPORTER_OTLP_METRICS_HEADERS takes precedence over OTEL_EXPORTER_OTLP_HEADERS.
|
||||||
|
The configuration can be overridden by [WithHeaders] option.
|
||||||
|
|
||||||
|
OTEL_EXPORTER_OTLP_TIMEOUT, OTEL_EXPORTER_OTLP_METRICS_TIMEOUT (default: "10000") -
|
||||||
|
maximum time in milliseconds the OTLP exporter waits for each batch export.
|
||||||
|
OTEL_EXPORTER_OTLP_METRICS_TIMEOUT takes precedence over OTEL_EXPORTER_OTLP_TIMEOUT.
|
||||||
|
The configuration can be overridden by [WithTimeout] option.
|
||||||
|
|
||||||
|
OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_METRICS_COMPRESSION (default: none) -
|
||||||
|
the gRPC compressor the exporter uses.
|
||||||
|
Supported value: "gzip".
|
||||||
|
OTEL_EXPORTER_OTLP_METRICS_COMPRESSION takes precedence over OTEL_EXPORTER_OTLP_COMPRESSION.
|
||||||
|
The configuration can be overridden by [WithCompressor], [WithGRPCConn] options.
|
||||||
|
|
||||||
|
OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE (default: none) -
|
||||||
|
the filepath to the trusted certificate to use when verifying a server's TLS credentials.
|
||||||
|
OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CERTIFICATE.
|
||||||
|
The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options.
|
||||||
|
|
||||||
|
OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE (default: none) -
|
||||||
|
the filepath to the client certificate/chain trust for clients private key to use in mTLS communication in PEM format.
|
||||||
|
OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE.
|
||||||
|
The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options.
|
||||||
|
|
||||||
|
OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY (default: none) -
|
||||||
|
the filepath to the clients private key to use in mTLS communication in PEM format.
|
||||||
|
OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY takes precedence over OTEL_EXPORTER_OTLP_CLIENT_KEY.
|
||||||
|
The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] option.
|
||||||
|
|
||||||
|
OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE (default: "cumulative") -
|
||||||
|
aggregation temporality to use on the basis of instrument kind. Supported values:
|
||||||
|
- "cumulative" - Cumulative aggregation temporality for all instrument kinds,
|
||||||
|
- "delta" - Delta aggregation temporality for Counter, Asynchronous Counter and Histogram instrument kinds;
|
||||||
|
Cumulative aggregation for UpDownCounter and Asynchronous UpDownCounter instrument kinds,
|
||||||
|
- "lowmemory" - Delta aggregation temporality for Synchronous Counter and Histogram instrument kinds;
|
||||||
|
Cumulative aggregation temporality for Synchronous UpDownCounter, Asynchronous Counter, and Asynchronous UpDownCounter instrument kinds.
|
||||||
|
|
||||||
|
The configuration can be overridden by [WithTemporalitySelector] option.
|
||||||
|
|
||||||
|
OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION (default: "explicit_bucket_histogram") -
|
||||||
|
default aggregation to use for histogram instruments. Supported values:
|
||||||
|
- "explicit_bucket_histogram" - [Explicit Bucket Histogram Aggregation],
|
||||||
|
- "base2_exponential_bucket_histogram" - [Base2 Exponential Bucket Histogram Aggregation].
|
||||||
|
|
||||||
|
The configuration can be overridden by [WithAggregationSelector] option.
|
||||||
|
|
||||||
|
[W3C Baggage HTTP Header Content Format]: https://www.w3.org/TR/baggage/#header-content
|
||||||
|
[Explicit Bucket Histogram Aggregation]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.26.0/specification/metrics/sdk.md#explicit-bucket-histogram-aggregation
|
||||||
|
[Base2 Exponential Bucket Histogram Aggregation]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.26.0/specification/metrics/sdk.md#base2-exponential-bucket-histogram-aggregation
|
||||||
|
*/
|
||||||
|
package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
|
167
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter.go
generated
vendored
Normal file
167
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter.go
generated
vendored
Normal file
|
@ -0,0 +1,167 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
|
||||||
|
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform"
|
||||||
|
"go.opentelemetry.io/otel/internal/global"
|
||||||
|
"go.opentelemetry.io/otel/sdk/metric"
|
||||||
|
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||||
|
metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Exporter is a OpenTelemetry metric Exporter using gRPC.
|
||||||
|
type Exporter struct {
|
||||||
|
// Ensure synchronous access to the client across all functionality.
|
||||||
|
clientMu sync.Mutex
|
||||||
|
client interface {
|
||||||
|
UploadMetrics(context.Context, *metricpb.ResourceMetrics) error
|
||||||
|
Shutdown(context.Context) error
|
||||||
|
}
|
||||||
|
|
||||||
|
temporalitySelector metric.TemporalitySelector
|
||||||
|
aggregationSelector metric.AggregationSelector
|
||||||
|
|
||||||
|
shutdownOnce sync.Once
|
||||||
|
}
|
||||||
|
|
||||||
|
func newExporter(c *client, cfg oconf.Config) (*Exporter, error) {
|
||||||
|
ts := cfg.Metrics.TemporalitySelector
|
||||||
|
if ts == nil {
|
||||||
|
ts = func(metric.InstrumentKind) metricdata.Temporality {
|
||||||
|
return metricdata.CumulativeTemporality
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
as := cfg.Metrics.AggregationSelector
|
||||||
|
if as == nil {
|
||||||
|
as = metric.DefaultAggregationSelector
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Exporter{
|
||||||
|
client: c,
|
||||||
|
|
||||||
|
temporalitySelector: ts,
|
||||||
|
aggregationSelector: as,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Temporality returns the Temporality to use for an instrument kind.
|
||||||
|
func (e *Exporter) Temporality(k metric.InstrumentKind) metricdata.Temporality {
|
||||||
|
return e.temporalitySelector(k)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregation returns the Aggregation to use for an instrument kind.
|
||||||
|
func (e *Exporter) Aggregation(k metric.InstrumentKind) metric.Aggregation {
|
||||||
|
return e.aggregationSelector(k)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Export transforms and transmits metric data to an OTLP receiver.
|
||||||
|
//
|
||||||
|
// This method returns an error if called after Shutdown.
|
||||||
|
// This method returns an error if the method is canceled by the passed context.
|
||||||
|
func (e *Exporter) Export(ctx context.Context, rm *metricdata.ResourceMetrics) error {
|
||||||
|
defer global.Debug("OTLP/gRPC exporter export", "Data", rm)
|
||||||
|
|
||||||
|
otlpRm, err := transform.ResourceMetrics(rm)
|
||||||
|
// Best effort upload of transformable metrics.
|
||||||
|
e.clientMu.Lock()
|
||||||
|
upErr := e.client.UploadMetrics(ctx, otlpRm)
|
||||||
|
e.clientMu.Unlock()
|
||||||
|
if upErr != nil {
|
||||||
|
if err == nil {
|
||||||
|
return fmt.Errorf("failed to upload metrics: %w", upErr)
|
||||||
|
}
|
||||||
|
// Merge the two errors.
|
||||||
|
return fmt.Errorf("failed to upload incomplete metrics (%s): %w", err, upErr)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForceFlush flushes any metric data held by an exporter.
|
||||||
|
//
|
||||||
|
// This method returns an error if called after Shutdown.
|
||||||
|
// This method returns an error if the method is canceled by the passed context.
|
||||||
|
//
|
||||||
|
// This method is safe to call concurrently.
|
||||||
|
func (e *Exporter) ForceFlush(ctx context.Context) error {
|
||||||
|
// The exporter and client hold no state, nothing to flush.
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shutdown flushes all metric data held by an exporter and releases any held
|
||||||
|
// computational resources.
|
||||||
|
//
|
||||||
|
// This method returns an error if called after Shutdown.
|
||||||
|
// This method returns an error if the method is canceled by the passed context.
|
||||||
|
//
|
||||||
|
// This method is safe to call concurrently.
|
||||||
|
func (e *Exporter) Shutdown(ctx context.Context) error {
|
||||||
|
err := errShutdown
|
||||||
|
e.shutdownOnce.Do(func() {
|
||||||
|
e.clientMu.Lock()
|
||||||
|
client := e.client
|
||||||
|
e.client = shutdownClient{}
|
||||||
|
e.clientMu.Unlock()
|
||||||
|
err = client.Shutdown(ctx)
|
||||||
|
})
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var errShutdown = fmt.Errorf("gRPC exporter is shutdown")
|
||||||
|
|
||||||
|
type shutdownClient struct{}
|
||||||
|
|
||||||
|
func (c shutdownClient) err(ctx context.Context) error {
|
||||||
|
if err := ctx.Err(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return errShutdown
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c shutdownClient) UploadMetrics(ctx context.Context, _ *metricpb.ResourceMetrics) error {
|
||||||
|
return c.err(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c shutdownClient) Shutdown(ctx context.Context) error {
|
||||||
|
return c.err(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalLog returns logging data about the Exporter.
|
||||||
|
func (e *Exporter) MarshalLog() interface{} {
|
||||||
|
return struct{ Type string }{Type: "OTLP/gRPC"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// New returns an OpenTelemetry metric Exporter. The Exporter can be used with
|
||||||
|
// a PeriodicReader to export OpenTelemetry metric data to an OTLP receiving
|
||||||
|
// endpoint using gRPC.
|
||||||
|
//
|
||||||
|
// If an already established gRPC ClientConn is not passed in options using
|
||||||
|
// WithGRPCConn, a connection to the OTLP endpoint will be established based
|
||||||
|
// on options. If a connection cannot be establishes in the lifetime of ctx,
|
||||||
|
// an error will be returned.
|
||||||
|
func New(ctx context.Context, options ...Option) (*Exporter, error) {
|
||||||
|
cfg := oconf.NewGRPCConfig(asGRPCOptions(options)...)
|
||||||
|
c, err := newClient(ctx, cfg)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return newExporter(c, cfg)
|
||||||
|
}
|
202
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig.go
generated
vendored
Normal file
202
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig.go
generated
vendored
Normal file
|
@ -0,0 +1,202 @@
|
||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/envconfig/envconfig.go.tmpl
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"crypto/x509"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel/internal/global"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ConfigFn is the generic function used to set a config.
|
||||||
|
type ConfigFn func(*EnvOptionsReader)
|
||||||
|
|
||||||
|
// EnvOptionsReader reads the required environment variables.
|
||||||
|
type EnvOptionsReader struct {
|
||||||
|
GetEnv func(string) string
|
||||||
|
ReadFile func(string) ([]byte, error)
|
||||||
|
Namespace string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply runs every ConfigFn.
|
||||||
|
func (e *EnvOptionsReader) Apply(opts ...ConfigFn) {
|
||||||
|
for _, o := range opts {
|
||||||
|
o(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetEnvValue gets an OTLP environment variable value of the specified key
|
||||||
|
// using the GetEnv function.
|
||||||
|
// This function prepends the OTLP specified namespace to all key lookups.
|
||||||
|
func (e *EnvOptionsReader) GetEnvValue(key string) (string, bool) {
|
||||||
|
v := strings.TrimSpace(e.GetEnv(keyWithNamespace(e.Namespace, key)))
|
||||||
|
return v, v != ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithString retrieves the specified config and passes it to ConfigFn as a string.
|
||||||
|
func WithString(n string, fn func(string)) func(e *EnvOptionsReader) {
|
||||||
|
return func(e *EnvOptionsReader) {
|
||||||
|
if v, ok := e.GetEnvValue(n); ok {
|
||||||
|
fn(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithBool returns a ConfigFn that reads the environment variable n and if it exists passes its parsed bool value to fn.
|
||||||
|
func WithBool(n string, fn func(bool)) ConfigFn {
|
||||||
|
return func(e *EnvOptionsReader) {
|
||||||
|
if v, ok := e.GetEnvValue(n); ok {
|
||||||
|
b := strings.ToLower(v) == "true"
|
||||||
|
fn(b)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithDuration retrieves the specified config and passes it to ConfigFn as a duration.
|
||||||
|
func WithDuration(n string, fn func(time.Duration)) func(e *EnvOptionsReader) {
|
||||||
|
return func(e *EnvOptionsReader) {
|
||||||
|
if v, ok := e.GetEnvValue(n); ok {
|
||||||
|
d, err := strconv.Atoi(v)
|
||||||
|
if err != nil {
|
||||||
|
global.Error(err, "parse duration", "input", v)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fn(time.Duration(d) * time.Millisecond)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithHeaders retrieves the specified config and passes it to ConfigFn as a map of HTTP headers.
|
||||||
|
func WithHeaders(n string, fn func(map[string]string)) func(e *EnvOptionsReader) {
|
||||||
|
return func(e *EnvOptionsReader) {
|
||||||
|
if v, ok := e.GetEnvValue(n); ok {
|
||||||
|
fn(stringToHeader(v))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithURL retrieves the specified config and passes it to ConfigFn as a net/url.URL.
|
||||||
|
func WithURL(n string, fn func(*url.URL)) func(e *EnvOptionsReader) {
|
||||||
|
return func(e *EnvOptionsReader) {
|
||||||
|
if v, ok := e.GetEnvValue(n); ok {
|
||||||
|
u, err := url.Parse(v)
|
||||||
|
if err != nil {
|
||||||
|
global.Error(err, "parse url", "input", v)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fn(u)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithCertPool returns a ConfigFn that reads the environment variable n as a filepath to a TLS certificate pool. If it exists, it is parsed as a crypto/x509.CertPool and it is passed to fn.
|
||||||
|
func WithCertPool(n string, fn func(*x509.CertPool)) ConfigFn {
|
||||||
|
return func(e *EnvOptionsReader) {
|
||||||
|
if v, ok := e.GetEnvValue(n); ok {
|
||||||
|
b, err := e.ReadFile(v)
|
||||||
|
if err != nil {
|
||||||
|
global.Error(err, "read tls ca cert file", "file", v)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c, err := createCertPool(b)
|
||||||
|
if err != nil {
|
||||||
|
global.Error(err, "create tls cert pool")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fn(c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithClientCert returns a ConfigFn that reads the environment variable nc and nk as filepaths to a client certificate and key pair. If they exists, they are parsed as a crypto/tls.Certificate and it is passed to fn.
|
||||||
|
func WithClientCert(nc, nk string, fn func(tls.Certificate)) ConfigFn {
|
||||||
|
return func(e *EnvOptionsReader) {
|
||||||
|
vc, okc := e.GetEnvValue(nc)
|
||||||
|
vk, okk := e.GetEnvValue(nk)
|
||||||
|
if !okc || !okk {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
cert, err := e.ReadFile(vc)
|
||||||
|
if err != nil {
|
||||||
|
global.Error(err, "read tls client cert", "file", vc)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
key, err := e.ReadFile(vk)
|
||||||
|
if err != nil {
|
||||||
|
global.Error(err, "read tls client key", "file", vk)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
crt, err := tls.X509KeyPair(cert, key)
|
||||||
|
if err != nil {
|
||||||
|
global.Error(err, "create tls client key pair")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fn(crt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func keyWithNamespace(ns, key string) string {
|
||||||
|
if ns == "" {
|
||||||
|
return key
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s_%s", ns, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
func stringToHeader(value string) map[string]string {
|
||||||
|
headersPairs := strings.Split(value, ",")
|
||||||
|
headers := make(map[string]string)
|
||||||
|
|
||||||
|
for _, header := range headersPairs {
|
||||||
|
n, v, found := strings.Cut(header, "=")
|
||||||
|
if !found {
|
||||||
|
global.Error(errors.New("missing '="), "parse headers", "input", header)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
name, err := url.PathUnescape(n)
|
||||||
|
if err != nil {
|
||||||
|
global.Error(err, "escape header key", "key", n)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
trimmedName := strings.TrimSpace(name)
|
||||||
|
value, err := url.PathUnescape(v)
|
||||||
|
if err != nil {
|
||||||
|
global.Error(err, "escape header value", "value", v)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
trimmedValue := strings.TrimSpace(value)
|
||||||
|
|
||||||
|
headers[trimmedName] = trimmedValue
|
||||||
|
}
|
||||||
|
|
||||||
|
return headers
|
||||||
|
}
|
||||||
|
|
||||||
|
func createCertPool(certBytes []byte) (*x509.CertPool, error) {
|
||||||
|
cp := x509.NewCertPool()
|
||||||
|
if ok := cp.AppendCertsFromPEM(certBytes); !ok {
|
||||||
|
return nil, errors.New("failed to append certificate to the cert pool")
|
||||||
|
}
|
||||||
|
return cp, nil
|
||||||
|
}
|
42
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/gen.go
generated
vendored
Normal file
42
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/gen.go
generated
vendored
Normal file
|
@ -0,0 +1,42 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal"
|
||||||
|
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess_test.go.tmpl "--data={}" --out=partialsuccess_test.go
|
||||||
|
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry.go.tmpl "--data={}" --out=retry/retry.go
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry_test.go.tmpl "--data={}" --out=retry/retry_test.go
|
||||||
|
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig.go.tmpl "--data={}" --out=envconfig/envconfig.go
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig_test.go.tmpl "--data={}" --out=envconfig/envconfig_test.go
|
||||||
|
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/envconfig.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig\"}" --out=oconf/envconfig.go
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/envconfig_test.go.tmpl "--data={}" --out=oconf/envconfig_test.go
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/options.go.tmpl "--data={\"retryImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry\"}" --out=oconf/options.go
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/options_test.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig\"}" --out=oconf/options_test.go
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/optiontypes.go.tmpl "--data={}" --out=oconf/optiontypes.go
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/tls.go.tmpl "--data={}" --out=oconf/tls.go
|
||||||
|
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/client.go.tmpl "--data={}" --out=otest/client.go
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/client_test.go.tmpl "--data={\"internalImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal\"}" --out=otest/client_test.go
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/collector.go.tmpl "--data={\"oconfImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf\"}" --out=otest/collector.go
|
||||||
|
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/attribute.go.tmpl "--data={}" --out=transform/attribute.go
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/attribute_test.go.tmpl "--data={}" --out=transform/attribute_test.go
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/error.go.tmpl "--data={}" --out=transform/error.go
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/error_test.go.tmpl "--data={}" --out=transform/error_test.go
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/metricdata.go.tmpl "--data={}" --out=transform/metricdata.go
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/metricdata_test.go.tmpl "--data={}" --out=transform/metricdata_test.go
|
221
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig.go
generated
vendored
Normal file
221
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig.go
generated
vendored
Normal file
|
@ -0,0 +1,221 @@
|
||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/otlpmetric/oconf/envconfig.go.tmpl
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"crypto/x509"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig"
|
||||||
|
"go.opentelemetry.io/otel/internal/global"
|
||||||
|
"go.opentelemetry.io/otel/sdk/metric"
|
||||||
|
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DefaultEnvOptionsReader is the default environments reader.
|
||||||
|
var DefaultEnvOptionsReader = envconfig.EnvOptionsReader{
|
||||||
|
GetEnv: os.Getenv,
|
||||||
|
ReadFile: os.ReadFile,
|
||||||
|
Namespace: "OTEL_EXPORTER_OTLP",
|
||||||
|
}
|
||||||
|
|
||||||
|
// ApplyGRPCEnvConfigs applies the env configurations for gRPC.
|
||||||
|
func ApplyGRPCEnvConfigs(cfg Config) Config {
|
||||||
|
opts := getOptionsFromEnv()
|
||||||
|
for _, opt := range opts {
|
||||||
|
cfg = opt.ApplyGRPCOption(cfg)
|
||||||
|
}
|
||||||
|
return cfg
|
||||||
|
}
|
||||||
|
|
||||||
|
// ApplyHTTPEnvConfigs applies the env configurations for HTTP.
|
||||||
|
func ApplyHTTPEnvConfigs(cfg Config) Config {
|
||||||
|
opts := getOptionsFromEnv()
|
||||||
|
for _, opt := range opts {
|
||||||
|
cfg = opt.ApplyHTTPOption(cfg)
|
||||||
|
}
|
||||||
|
return cfg
|
||||||
|
}
|
||||||
|
|
||||||
|
func getOptionsFromEnv() []GenericOption {
|
||||||
|
opts := []GenericOption{}
|
||||||
|
|
||||||
|
tlsConf := &tls.Config{}
|
||||||
|
DefaultEnvOptionsReader.Apply(
|
||||||
|
envconfig.WithURL("ENDPOINT", func(u *url.URL) {
|
||||||
|
opts = append(opts, withEndpointScheme(u))
|
||||||
|
opts = append(opts, newSplitOption(func(cfg Config) Config {
|
||||||
|
cfg.Metrics.Endpoint = u.Host
|
||||||
|
// For OTLP/HTTP endpoint URLs without a per-signal
|
||||||
|
// configuration, the passed endpoint is used as a base URL
|
||||||
|
// and the signals are sent to these paths relative to that.
|
||||||
|
cfg.Metrics.URLPath = path.Join(u.Path, DefaultMetricsPath)
|
||||||
|
return cfg
|
||||||
|
}, withEndpointForGRPC(u)))
|
||||||
|
}),
|
||||||
|
envconfig.WithURL("METRICS_ENDPOINT", func(u *url.URL) {
|
||||||
|
opts = append(opts, withEndpointScheme(u))
|
||||||
|
opts = append(opts, newSplitOption(func(cfg Config) Config {
|
||||||
|
cfg.Metrics.Endpoint = u.Host
|
||||||
|
// For endpoint URLs for OTLP/HTTP per-signal variables, the
|
||||||
|
// URL MUST be used as-is without any modification. The only
|
||||||
|
// exception is that if an URL contains no path part, the root
|
||||||
|
// path / MUST be used.
|
||||||
|
path := u.Path
|
||||||
|
if path == "" {
|
||||||
|
path = "/"
|
||||||
|
}
|
||||||
|
cfg.Metrics.URLPath = path
|
||||||
|
return cfg
|
||||||
|
}, withEndpointForGRPC(u)))
|
||||||
|
}),
|
||||||
|
envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
|
||||||
|
envconfig.WithCertPool("METRICS_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
|
||||||
|
envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
|
||||||
|
envconfig.WithClientCert("METRICS_CLIENT_CERTIFICATE", "METRICS_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
|
||||||
|
envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
|
||||||
|
envconfig.WithBool("METRICS_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
|
||||||
|
withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }),
|
||||||
|
envconfig.WithHeaders("HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
|
||||||
|
envconfig.WithHeaders("METRICS_HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
|
||||||
|
WithEnvCompression("COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
|
||||||
|
WithEnvCompression("METRICS_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
|
||||||
|
envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
|
||||||
|
envconfig.WithDuration("METRICS_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
|
||||||
|
withEnvTemporalityPreference("METRICS_TEMPORALITY_PREFERENCE", func(t metric.TemporalitySelector) { opts = append(opts, WithTemporalitySelector(t)) }),
|
||||||
|
withEnvAggPreference("METRICS_DEFAULT_HISTOGRAM_AGGREGATION", func(a metric.AggregationSelector) { opts = append(opts, WithAggregationSelector(a)) }),
|
||||||
|
)
|
||||||
|
|
||||||
|
return opts
|
||||||
|
}
|
||||||
|
|
||||||
|
func withEndpointForGRPC(u *url.URL) func(cfg Config) Config {
|
||||||
|
return func(cfg Config) Config {
|
||||||
|
// For OTLP/gRPC endpoints, this is the target to which the
|
||||||
|
// exporter is going to send telemetry.
|
||||||
|
cfg.Metrics.Endpoint = path.Join(u.Host, u.Path)
|
||||||
|
return cfg
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithEnvCompression retrieves the specified config and passes it to ConfigFn as a Compression.
|
||||||
|
func WithEnvCompression(n string, fn func(Compression)) func(e *envconfig.EnvOptionsReader) {
|
||||||
|
return func(e *envconfig.EnvOptionsReader) {
|
||||||
|
if v, ok := e.GetEnvValue(n); ok {
|
||||||
|
cp := NoCompression
|
||||||
|
if v == "gzip" {
|
||||||
|
cp = GzipCompression
|
||||||
|
}
|
||||||
|
|
||||||
|
fn(cp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func withEndpointScheme(u *url.URL) GenericOption {
|
||||||
|
switch strings.ToLower(u.Scheme) {
|
||||||
|
case "http", "unix":
|
||||||
|
return WithInsecure()
|
||||||
|
default:
|
||||||
|
return WithSecure()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// revive:disable-next-line:flag-parameter
|
||||||
|
func withInsecure(b bool) GenericOption {
|
||||||
|
if b {
|
||||||
|
return WithInsecure()
|
||||||
|
}
|
||||||
|
return WithSecure()
|
||||||
|
}
|
||||||
|
|
||||||
|
func withTLSConfig(c *tls.Config, fn func(*tls.Config)) func(e *envconfig.EnvOptionsReader) {
|
||||||
|
return func(e *envconfig.EnvOptionsReader) {
|
||||||
|
if c.RootCAs != nil || len(c.Certificates) > 0 {
|
||||||
|
fn(c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func withEnvTemporalityPreference(n string, fn func(metric.TemporalitySelector)) func(e *envconfig.EnvOptionsReader) {
|
||||||
|
return func(e *envconfig.EnvOptionsReader) {
|
||||||
|
if s, ok := e.GetEnvValue(n); ok {
|
||||||
|
switch strings.ToLower(s) {
|
||||||
|
case "cumulative":
|
||||||
|
fn(cumulativeTemporality)
|
||||||
|
case "delta":
|
||||||
|
fn(deltaTemporality)
|
||||||
|
case "lowmemory":
|
||||||
|
fn(lowMemory)
|
||||||
|
default:
|
||||||
|
global.Warn("OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE is set to an invalid value, ignoring.", "value", s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func cumulativeTemporality(metric.InstrumentKind) metricdata.Temporality {
|
||||||
|
return metricdata.CumulativeTemporality
|
||||||
|
}
|
||||||
|
|
||||||
|
func deltaTemporality(ik metric.InstrumentKind) metricdata.Temporality {
|
||||||
|
switch ik {
|
||||||
|
case metric.InstrumentKindCounter, metric.InstrumentKindHistogram, metric.InstrumentKindObservableCounter:
|
||||||
|
return metricdata.DeltaTemporality
|
||||||
|
default:
|
||||||
|
return metricdata.CumulativeTemporality
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func lowMemory(ik metric.InstrumentKind) metricdata.Temporality {
|
||||||
|
switch ik {
|
||||||
|
case metric.InstrumentKindCounter, metric.InstrumentKindHistogram:
|
||||||
|
return metricdata.DeltaTemporality
|
||||||
|
default:
|
||||||
|
return metricdata.CumulativeTemporality
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func withEnvAggPreference(n string, fn func(metric.AggregationSelector)) func(e *envconfig.EnvOptionsReader) {
|
||||||
|
return func(e *envconfig.EnvOptionsReader) {
|
||||||
|
if s, ok := e.GetEnvValue(n); ok {
|
||||||
|
switch strings.ToLower(s) {
|
||||||
|
case "explicit_bucket_histogram":
|
||||||
|
fn(metric.DefaultAggregationSelector)
|
||||||
|
case "base2_exponential_bucket_histogram":
|
||||||
|
fn(func(kind metric.InstrumentKind) metric.Aggregation {
|
||||||
|
if kind == metric.InstrumentKindHistogram {
|
||||||
|
return metric.AggregationBase2ExponentialHistogram{
|
||||||
|
MaxSize: 160,
|
||||||
|
MaxScale: 20,
|
||||||
|
NoMinMax: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return metric.DefaultAggregationSelector(kind)
|
||||||
|
})
|
||||||
|
default:
|
||||||
|
global.Warn("OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION is set to an invalid value, ignoring.", "value", s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
353
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go
generated
vendored
Normal file
353
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go
generated
vendored
Normal file
|
@ -0,0 +1,353 @@
|
||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/otlpmetric/oconf/options.go.tmpl
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"fmt"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/backoff"
|
||||||
|
"google.golang.org/grpc/credentials"
|
||||||
|
"google.golang.org/grpc/credentials/insecure"
|
||||||
|
"google.golang.org/grpc/encoding/gzip"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry"
|
||||||
|
"go.opentelemetry.io/otel/sdk/metric"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// DefaultMaxAttempts describes how many times the driver
|
||||||
|
// should retry the sending of the payload in case of a
|
||||||
|
// retryable error.
|
||||||
|
DefaultMaxAttempts int = 5
|
||||||
|
// DefaultMetricsPath is a default URL path for endpoint that
|
||||||
|
// receives metrics.
|
||||||
|
DefaultMetricsPath string = "/v1/metrics"
|
||||||
|
// DefaultBackoff is a default base backoff time used in the
|
||||||
|
// exponential backoff strategy.
|
||||||
|
DefaultBackoff time.Duration = 300 * time.Millisecond
|
||||||
|
// DefaultTimeout is a default max waiting time for the backend to process
|
||||||
|
// each span or metrics batch.
|
||||||
|
DefaultTimeout time.Duration = 10 * time.Second
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
SignalConfig struct {
|
||||||
|
Endpoint string
|
||||||
|
Insecure bool
|
||||||
|
TLSCfg *tls.Config
|
||||||
|
Headers map[string]string
|
||||||
|
Compression Compression
|
||||||
|
Timeout time.Duration
|
||||||
|
URLPath string
|
||||||
|
|
||||||
|
// gRPC configurations
|
||||||
|
GRPCCredentials credentials.TransportCredentials
|
||||||
|
|
||||||
|
TemporalitySelector metric.TemporalitySelector
|
||||||
|
AggregationSelector metric.AggregationSelector
|
||||||
|
}
|
||||||
|
|
||||||
|
Config struct {
|
||||||
|
// Signal specific configurations
|
||||||
|
Metrics SignalConfig
|
||||||
|
|
||||||
|
RetryConfig retry.Config
|
||||||
|
|
||||||
|
// gRPC configurations
|
||||||
|
ReconnectionPeriod time.Duration
|
||||||
|
ServiceConfig string
|
||||||
|
DialOptions []grpc.DialOption
|
||||||
|
GRPCConn *grpc.ClientConn
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewHTTPConfig returns a new Config with all settings applied from opts and
|
||||||
|
// any unset setting using the default HTTP config values.
|
||||||
|
func NewHTTPConfig(opts ...HTTPOption) Config {
|
||||||
|
cfg := Config{
|
||||||
|
Metrics: SignalConfig{
|
||||||
|
Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorHTTPPort),
|
||||||
|
URLPath: DefaultMetricsPath,
|
||||||
|
Compression: NoCompression,
|
||||||
|
Timeout: DefaultTimeout,
|
||||||
|
|
||||||
|
TemporalitySelector: metric.DefaultTemporalitySelector,
|
||||||
|
AggregationSelector: metric.DefaultAggregationSelector,
|
||||||
|
},
|
||||||
|
RetryConfig: retry.DefaultConfig,
|
||||||
|
}
|
||||||
|
cfg = ApplyHTTPEnvConfigs(cfg)
|
||||||
|
for _, opt := range opts {
|
||||||
|
cfg = opt.ApplyHTTPOption(cfg)
|
||||||
|
}
|
||||||
|
cfg.Metrics.URLPath = cleanPath(cfg.Metrics.URLPath, DefaultMetricsPath)
|
||||||
|
return cfg
|
||||||
|
}
|
||||||
|
|
||||||
|
// cleanPath returns a path with all spaces trimmed and all redundancies
|
||||||
|
// removed. If urlPath is empty or cleaning it results in an empty string,
|
||||||
|
// defaultPath is returned instead.
|
||||||
|
func cleanPath(urlPath string, defaultPath string) string {
|
||||||
|
tmp := path.Clean(strings.TrimSpace(urlPath))
|
||||||
|
if tmp == "." {
|
||||||
|
return defaultPath
|
||||||
|
}
|
||||||
|
if !path.IsAbs(tmp) {
|
||||||
|
tmp = fmt.Sprintf("/%s", tmp)
|
||||||
|
}
|
||||||
|
return tmp
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGRPCConfig returns a new Config with all settings applied from opts and
|
||||||
|
// any unset setting using the default gRPC config values.
|
||||||
|
func NewGRPCConfig(opts ...GRPCOption) Config {
|
||||||
|
cfg := Config{
|
||||||
|
Metrics: SignalConfig{
|
||||||
|
Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort),
|
||||||
|
URLPath: DefaultMetricsPath,
|
||||||
|
Compression: NoCompression,
|
||||||
|
Timeout: DefaultTimeout,
|
||||||
|
|
||||||
|
TemporalitySelector: metric.DefaultTemporalitySelector,
|
||||||
|
AggregationSelector: metric.DefaultAggregationSelector,
|
||||||
|
},
|
||||||
|
RetryConfig: retry.DefaultConfig,
|
||||||
|
}
|
||||||
|
cfg = ApplyGRPCEnvConfigs(cfg)
|
||||||
|
for _, opt := range opts {
|
||||||
|
cfg = opt.ApplyGRPCOption(cfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
if cfg.ServiceConfig != "" {
|
||||||
|
cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig))
|
||||||
|
}
|
||||||
|
// Priroritize GRPCCredentials over Insecure (passing both is an error).
|
||||||
|
if cfg.Metrics.GRPCCredentials != nil {
|
||||||
|
cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Metrics.GRPCCredentials))
|
||||||
|
} else if cfg.Metrics.Insecure {
|
||||||
|
cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||||
|
} else {
|
||||||
|
// Default to using the host's root CA.
|
||||||
|
creds := credentials.NewTLS(nil)
|
||||||
|
cfg.Metrics.GRPCCredentials = creds
|
||||||
|
cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(creds))
|
||||||
|
}
|
||||||
|
if cfg.Metrics.Compression == GzipCompression {
|
||||||
|
cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name)))
|
||||||
|
}
|
||||||
|
if cfg.ReconnectionPeriod != 0 {
|
||||||
|
p := grpc.ConnectParams{
|
||||||
|
Backoff: backoff.DefaultConfig,
|
||||||
|
MinConnectTimeout: cfg.ReconnectionPeriod,
|
||||||
|
}
|
||||||
|
cfg.DialOptions = append(cfg.DialOptions, grpc.WithConnectParams(p))
|
||||||
|
}
|
||||||
|
|
||||||
|
return cfg
|
||||||
|
}
|
||||||
|
|
||||||
|
type (
|
||||||
|
// GenericOption applies an option to the HTTP or gRPC driver.
|
||||||
|
GenericOption interface {
|
||||||
|
ApplyHTTPOption(Config) Config
|
||||||
|
ApplyGRPCOption(Config) Config
|
||||||
|
|
||||||
|
// A private method to prevent users implementing the
|
||||||
|
// interface and so future additions to it will not
|
||||||
|
// violate compatibility.
|
||||||
|
private()
|
||||||
|
}
|
||||||
|
|
||||||
|
// HTTPOption applies an option to the HTTP driver.
|
||||||
|
HTTPOption interface {
|
||||||
|
ApplyHTTPOption(Config) Config
|
||||||
|
|
||||||
|
// A private method to prevent users implementing the
|
||||||
|
// interface and so future additions to it will not
|
||||||
|
// violate compatibility.
|
||||||
|
private()
|
||||||
|
}
|
||||||
|
|
||||||
|
// GRPCOption applies an option to the gRPC driver.
|
||||||
|
GRPCOption interface {
|
||||||
|
ApplyGRPCOption(Config) Config
|
||||||
|
|
||||||
|
// A private method to prevent users implementing the
|
||||||
|
// interface and so future additions to it will not
|
||||||
|
// violate compatibility.
|
||||||
|
private()
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// genericOption is an option that applies the same logic
|
||||||
|
// for both gRPC and HTTP.
|
||||||
|
type genericOption struct {
|
||||||
|
fn func(Config) Config
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *genericOption) ApplyGRPCOption(cfg Config) Config {
|
||||||
|
return g.fn(cfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *genericOption) ApplyHTTPOption(cfg Config) Config {
|
||||||
|
return g.fn(cfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (genericOption) private() {}
|
||||||
|
|
||||||
|
func newGenericOption(fn func(cfg Config) Config) GenericOption {
|
||||||
|
return &genericOption{fn: fn}
|
||||||
|
}
|
||||||
|
|
||||||
|
// splitOption is an option that applies different logics
|
||||||
|
// for gRPC and HTTP.
|
||||||
|
type splitOption struct {
|
||||||
|
httpFn func(Config) Config
|
||||||
|
grpcFn func(Config) Config
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *splitOption) ApplyGRPCOption(cfg Config) Config {
|
||||||
|
return g.grpcFn(cfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *splitOption) ApplyHTTPOption(cfg Config) Config {
|
||||||
|
return g.httpFn(cfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (splitOption) private() {}
|
||||||
|
|
||||||
|
func newSplitOption(httpFn func(cfg Config) Config, grpcFn func(cfg Config) Config) GenericOption {
|
||||||
|
return &splitOption{httpFn: httpFn, grpcFn: grpcFn}
|
||||||
|
}
|
||||||
|
|
||||||
|
// httpOption is an option that is only applied to the HTTP driver.
|
||||||
|
type httpOption struct {
|
||||||
|
fn func(Config) Config
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *httpOption) ApplyHTTPOption(cfg Config) Config {
|
||||||
|
return h.fn(cfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (httpOption) private() {}
|
||||||
|
|
||||||
|
func NewHTTPOption(fn func(cfg Config) Config) HTTPOption {
|
||||||
|
return &httpOption{fn: fn}
|
||||||
|
}
|
||||||
|
|
||||||
|
// grpcOption is an option that is only applied to the gRPC driver.
|
||||||
|
type grpcOption struct {
|
||||||
|
fn func(Config) Config
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *grpcOption) ApplyGRPCOption(cfg Config) Config {
|
||||||
|
return h.fn(cfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (grpcOption) private() {}
|
||||||
|
|
||||||
|
func NewGRPCOption(fn func(cfg Config) Config) GRPCOption {
|
||||||
|
return &grpcOption{fn: fn}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generic Options
|
||||||
|
|
||||||
|
func WithEndpoint(endpoint string) GenericOption {
|
||||||
|
return newGenericOption(func(cfg Config) Config {
|
||||||
|
cfg.Metrics.Endpoint = endpoint
|
||||||
|
return cfg
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithCompression(compression Compression) GenericOption {
|
||||||
|
return newGenericOption(func(cfg Config) Config {
|
||||||
|
cfg.Metrics.Compression = compression
|
||||||
|
return cfg
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithURLPath(urlPath string) GenericOption {
|
||||||
|
return newGenericOption(func(cfg Config) Config {
|
||||||
|
cfg.Metrics.URLPath = urlPath
|
||||||
|
return cfg
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithRetry(rc retry.Config) GenericOption {
|
||||||
|
return newGenericOption(func(cfg Config) Config {
|
||||||
|
cfg.RetryConfig = rc
|
||||||
|
return cfg
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithTLSClientConfig(tlsCfg *tls.Config) GenericOption {
|
||||||
|
return newSplitOption(func(cfg Config) Config {
|
||||||
|
cfg.Metrics.TLSCfg = tlsCfg.Clone()
|
||||||
|
return cfg
|
||||||
|
}, func(cfg Config) Config {
|
||||||
|
cfg.Metrics.GRPCCredentials = credentials.NewTLS(tlsCfg)
|
||||||
|
return cfg
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithInsecure() GenericOption {
|
||||||
|
return newGenericOption(func(cfg Config) Config {
|
||||||
|
cfg.Metrics.Insecure = true
|
||||||
|
return cfg
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithSecure() GenericOption {
|
||||||
|
return newGenericOption(func(cfg Config) Config {
|
||||||
|
cfg.Metrics.Insecure = false
|
||||||
|
return cfg
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithHeaders(headers map[string]string) GenericOption {
|
||||||
|
return newGenericOption(func(cfg Config) Config {
|
||||||
|
cfg.Metrics.Headers = headers
|
||||||
|
return cfg
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithTimeout(duration time.Duration) GenericOption {
|
||||||
|
return newGenericOption(func(cfg Config) Config {
|
||||||
|
cfg.Metrics.Timeout = duration
|
||||||
|
return cfg
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithTemporalitySelector(selector metric.TemporalitySelector) GenericOption {
|
||||||
|
return newGenericOption(func(cfg Config) Config {
|
||||||
|
cfg.Metrics.TemporalitySelector = selector
|
||||||
|
return cfg
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithAggregationSelector(selector metric.AggregationSelector) GenericOption {
|
||||||
|
return newGenericOption(func(cfg Config) Config {
|
||||||
|
cfg.Metrics.AggregationSelector = selector
|
||||||
|
return cfg
|
||||||
|
})
|
||||||
|
}
|
58
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/optiontypes.go
generated
vendored
Normal file
58
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/optiontypes.go
generated
vendored
Normal file
|
@ -0,0 +1,58 @@
|
||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/otlpmetric/oconf/optiontypes.go.tmpl
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
const (
|
||||||
|
// DefaultCollectorGRPCPort is the default gRPC port of the collector.
|
||||||
|
DefaultCollectorGRPCPort uint16 = 4317
|
||||||
|
// DefaultCollectorHTTPPort is the default HTTP port of the collector.
|
||||||
|
DefaultCollectorHTTPPort uint16 = 4318
|
||||||
|
// DefaultCollectorHost is the host address the Exporter will attempt
|
||||||
|
// connect to if no collector address is provided.
|
||||||
|
DefaultCollectorHost string = "localhost"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Compression describes the compression used for payloads sent to the
|
||||||
|
// collector.
|
||||||
|
type Compression int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// NoCompression tells the driver to send payloads without
|
||||||
|
// compression.
|
||||||
|
NoCompression Compression = iota
|
||||||
|
// GzipCompression tells the driver to send payloads after
|
||||||
|
// compressing them with gzip.
|
||||||
|
GzipCompression
|
||||||
|
)
|
||||||
|
|
||||||
|
// RetrySettings defines configuration for retrying batches in case of export failure
|
||||||
|
// using an exponential backoff.
|
||||||
|
type RetrySettings struct {
|
||||||
|
// Enabled indicates whether to not retry sending batches in case of export failure.
|
||||||
|
Enabled bool
|
||||||
|
// InitialInterval the time to wait after the first failure before retrying.
|
||||||
|
InitialInterval time.Duration
|
||||||
|
// MaxInterval is the upper bound on backoff interval. Once this value is reached the delay between
|
||||||
|
// consecutive retries will always be `MaxInterval`.
|
||||||
|
MaxInterval time.Duration
|
||||||
|
// MaxElapsedTime is the maximum amount of time (including retries) spent trying to send a request/batch.
|
||||||
|
// Once this value is reached, the data is discarded.
|
||||||
|
MaxElapsedTime time.Duration
|
||||||
|
}
|
49
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/tls.go
generated
vendored
Normal file
49
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/tls.go
generated
vendored
Normal file
|
@ -0,0 +1,49 @@
|
||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/otlpmetric/oconf/tls.go.tmpl
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"crypto/x509"
|
||||||
|
"errors"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ReadTLSConfigFromFile reads a PEM certificate file and creates
|
||||||
|
// a tls.Config that will use this certifate to verify a server certificate.
|
||||||
|
func ReadTLSConfigFromFile(path string) (*tls.Config, error) {
|
||||||
|
b, err := os.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return CreateTLSConfig(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateTLSConfig creates a tls.Config from a raw certificate bytes
|
||||||
|
// to verify a server certificate.
|
||||||
|
func CreateTLSConfig(certBytes []byte) (*tls.Config, error) {
|
||||||
|
cp := x509.NewCertPool()
|
||||||
|
if ok := cp.AppendCertsFromPEM(certBytes); !ok {
|
||||||
|
return nil, errors.New("failed to append certificate to the cert pool")
|
||||||
|
}
|
||||||
|
|
||||||
|
return &tls.Config{
|
||||||
|
RootCAs: cp,
|
||||||
|
}, nil
|
||||||
|
}
|
67
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/partialsuccess.go
generated
vendored
Normal file
67
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/partialsuccess.go
generated
vendored
Normal file
|
@ -0,0 +1,67 @@
|
||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/partialsuccess.go
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal"
|
||||||
|
|
||||||
|
import "fmt"
|
||||||
|
|
||||||
|
// PartialSuccess represents the underlying error for all handling
|
||||||
|
// OTLP partial success messages. Use `errors.Is(err,
|
||||||
|
// PartialSuccess{})` to test whether an error passed to the OTel
|
||||||
|
// error handler belongs to this category.
|
||||||
|
type PartialSuccess struct {
|
||||||
|
ErrorMessage string
|
||||||
|
RejectedItems int64
|
||||||
|
RejectedKind string
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ error = PartialSuccess{}
|
||||||
|
|
||||||
|
// Error implements the error interface.
|
||||||
|
func (ps PartialSuccess) Error() string {
|
||||||
|
msg := ps.ErrorMessage
|
||||||
|
if msg == "" {
|
||||||
|
msg = "empty message"
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Is supports the errors.Is() interface.
|
||||||
|
func (ps PartialSuccess) Is(err error) bool {
|
||||||
|
_, ok := err.(PartialSuccess)
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// TracePartialSuccessError returns an error describing a partial success
|
||||||
|
// response for the trace signal.
|
||||||
|
func TracePartialSuccessError(itemsRejected int64, errorMessage string) error {
|
||||||
|
return PartialSuccess{
|
||||||
|
ErrorMessage: errorMessage,
|
||||||
|
RejectedItems: itemsRejected,
|
||||||
|
RejectedKind: "spans",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MetricPartialSuccessError returns an error describing a partial success
|
||||||
|
// response for the metric signal.
|
||||||
|
func MetricPartialSuccessError(itemsRejected int64, errorMessage string) error {
|
||||||
|
return PartialSuccess{
|
||||||
|
ErrorMessage: errorMessage,
|
||||||
|
RejectedItems: itemsRejected,
|
||||||
|
RejectedKind: "metric data points",
|
||||||
|
}
|
||||||
|
}
|
156
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go
generated
vendored
Normal file
156
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go
generated
vendored
Normal file
|
@ -0,0 +1,156 @@
|
||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/retry/retry.go.tmpl
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package retry provides request retry functionality that can perform
|
||||||
|
// configurable exponential backoff for transient errors and honor any
|
||||||
|
// explicit throttle responses received.
|
||||||
|
package retry // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/cenkalti/backoff/v4"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DefaultConfig are the recommended defaults to use.
|
||||||
|
var DefaultConfig = Config{
|
||||||
|
Enabled: true,
|
||||||
|
InitialInterval: 5 * time.Second,
|
||||||
|
MaxInterval: 30 * time.Second,
|
||||||
|
MaxElapsedTime: time.Minute,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Config defines configuration for retrying batches in case of export failure
|
||||||
|
// using an exponential backoff.
|
||||||
|
type Config struct {
|
||||||
|
// Enabled indicates whether to not retry sending batches in case of
|
||||||
|
// export failure.
|
||||||
|
Enabled bool
|
||||||
|
// InitialInterval the time to wait after the first failure before
|
||||||
|
// retrying.
|
||||||
|
InitialInterval time.Duration
|
||||||
|
// MaxInterval is the upper bound on backoff interval. Once this value is
|
||||||
|
// reached the delay between consecutive retries will always be
|
||||||
|
// `MaxInterval`.
|
||||||
|
MaxInterval time.Duration
|
||||||
|
// MaxElapsedTime is the maximum amount of time (including retries) spent
|
||||||
|
// trying to send a request/batch. Once this value is reached, the data
|
||||||
|
// is discarded.
|
||||||
|
MaxElapsedTime time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequestFunc wraps a request with retry logic.
|
||||||
|
type RequestFunc func(context.Context, func(context.Context) error) error
|
||||||
|
|
||||||
|
// EvaluateFunc returns if an error is retry-able and if an explicit throttle
|
||||||
|
// duration should be honored that was included in the error.
|
||||||
|
//
|
||||||
|
// The function must return true if the error argument is retry-able,
|
||||||
|
// otherwise it must return false for the first return parameter.
|
||||||
|
//
|
||||||
|
// The function must return a non-zero time.Duration if the error contains
|
||||||
|
// explicit throttle duration that should be honored, otherwise it must return
|
||||||
|
// a zero valued time.Duration.
|
||||||
|
type EvaluateFunc func(error) (bool, time.Duration)
|
||||||
|
|
||||||
|
// RequestFunc returns a RequestFunc using the evaluate function to determine
|
||||||
|
// if requests can be retried and based on the exponential backoff
|
||||||
|
// configuration of c.
|
||||||
|
func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
|
||||||
|
if !c.Enabled {
|
||||||
|
return func(ctx context.Context, fn func(context.Context) error) error {
|
||||||
|
return fn(ctx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return func(ctx context.Context, fn func(context.Context) error) error {
|
||||||
|
// Do not use NewExponentialBackOff since it calls Reset and the code here
|
||||||
|
// must call Reset after changing the InitialInterval (this saves an
|
||||||
|
// unnecessary call to Now).
|
||||||
|
b := &backoff.ExponentialBackOff{
|
||||||
|
InitialInterval: c.InitialInterval,
|
||||||
|
RandomizationFactor: backoff.DefaultRandomizationFactor,
|
||||||
|
Multiplier: backoff.DefaultMultiplier,
|
||||||
|
MaxInterval: c.MaxInterval,
|
||||||
|
MaxElapsedTime: c.MaxElapsedTime,
|
||||||
|
Stop: backoff.Stop,
|
||||||
|
Clock: backoff.SystemClock,
|
||||||
|
}
|
||||||
|
b.Reset()
|
||||||
|
|
||||||
|
for {
|
||||||
|
err := fn(ctx)
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
retryable, throttle := evaluate(err)
|
||||||
|
if !retryable {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
bOff := b.NextBackOff()
|
||||||
|
if bOff == backoff.Stop {
|
||||||
|
return fmt.Errorf("max retry time elapsed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for the greater of the backoff or throttle delay.
|
||||||
|
var delay time.Duration
|
||||||
|
if bOff > throttle {
|
||||||
|
delay = bOff
|
||||||
|
} else {
|
||||||
|
elapsed := b.GetElapsedTime()
|
||||||
|
if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime {
|
||||||
|
return fmt.Errorf("max retry time would elapse: %w", err)
|
||||||
|
}
|
||||||
|
delay = throttle
|
||||||
|
}
|
||||||
|
|
||||||
|
if ctxErr := waitFunc(ctx, delay); ctxErr != nil {
|
||||||
|
return fmt.Errorf("%w: %s", ctxErr, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Allow override for testing.
|
||||||
|
var waitFunc = wait
|
||||||
|
|
||||||
|
// wait takes the caller's context, and the amount of time to wait. It will
|
||||||
|
// return nil if the timer fires before or at the same time as the context's
|
||||||
|
// deadline. This indicates that the call can be retried.
|
||||||
|
func wait(ctx context.Context, delay time.Duration) error {
|
||||||
|
timer := time.NewTimer(delay)
|
||||||
|
defer timer.Stop()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
// Handle the case where the timer and context deadline end
|
||||||
|
// simultaneously by prioritizing the timer expiration nil value
|
||||||
|
// response.
|
||||||
|
select {
|
||||||
|
case <-timer.C:
|
||||||
|
default:
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
case <-timer.C:
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
155
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/attribute.go
generated
vendored
Normal file
155
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/attribute.go
generated
vendored
Normal file
|
@ -0,0 +1,155 @@
|
||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/otlpmetric/transform/attribute.go.tmpl
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"go.opentelemetry.io/otel/attribute"
|
||||||
|
cpb "go.opentelemetry.io/proto/otlp/common/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AttrIter transforms an attribute iterator into OTLP key-values.
|
||||||
|
func AttrIter(iter attribute.Iterator) []*cpb.KeyValue {
|
||||||
|
l := iter.Len()
|
||||||
|
if l == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
out := make([]*cpb.KeyValue, 0, l)
|
||||||
|
for iter.Next() {
|
||||||
|
out = append(out, KeyValue(iter.Attribute()))
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyValues transforms a slice of attribute KeyValues into OTLP key-values.
|
||||||
|
func KeyValues(attrs []attribute.KeyValue) []*cpb.KeyValue {
|
||||||
|
if len(attrs) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
out := make([]*cpb.KeyValue, 0, len(attrs))
|
||||||
|
for _, kv := range attrs {
|
||||||
|
out = append(out, KeyValue(kv))
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyValue transforms an attribute KeyValue into an OTLP key-value.
|
||||||
|
func KeyValue(kv attribute.KeyValue) *cpb.KeyValue {
|
||||||
|
return &cpb.KeyValue{Key: string(kv.Key), Value: Value(kv.Value)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value transforms an attribute Value into an OTLP AnyValue.
|
||||||
|
func Value(v attribute.Value) *cpb.AnyValue {
|
||||||
|
av := new(cpb.AnyValue)
|
||||||
|
switch v.Type() {
|
||||||
|
case attribute.BOOL:
|
||||||
|
av.Value = &cpb.AnyValue_BoolValue{
|
||||||
|
BoolValue: v.AsBool(),
|
||||||
|
}
|
||||||
|
case attribute.BOOLSLICE:
|
||||||
|
av.Value = &cpb.AnyValue_ArrayValue{
|
||||||
|
ArrayValue: &cpb.ArrayValue{
|
||||||
|
Values: boolSliceValues(v.AsBoolSlice()),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
case attribute.INT64:
|
||||||
|
av.Value = &cpb.AnyValue_IntValue{
|
||||||
|
IntValue: v.AsInt64(),
|
||||||
|
}
|
||||||
|
case attribute.INT64SLICE:
|
||||||
|
av.Value = &cpb.AnyValue_ArrayValue{
|
||||||
|
ArrayValue: &cpb.ArrayValue{
|
||||||
|
Values: int64SliceValues(v.AsInt64Slice()),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
case attribute.FLOAT64:
|
||||||
|
av.Value = &cpb.AnyValue_DoubleValue{
|
||||||
|
DoubleValue: v.AsFloat64(),
|
||||||
|
}
|
||||||
|
case attribute.FLOAT64SLICE:
|
||||||
|
av.Value = &cpb.AnyValue_ArrayValue{
|
||||||
|
ArrayValue: &cpb.ArrayValue{
|
||||||
|
Values: float64SliceValues(v.AsFloat64Slice()),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
case attribute.STRING:
|
||||||
|
av.Value = &cpb.AnyValue_StringValue{
|
||||||
|
StringValue: v.AsString(),
|
||||||
|
}
|
||||||
|
case attribute.STRINGSLICE:
|
||||||
|
av.Value = &cpb.AnyValue_ArrayValue{
|
||||||
|
ArrayValue: &cpb.ArrayValue{
|
||||||
|
Values: stringSliceValues(v.AsStringSlice()),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
av.Value = &cpb.AnyValue_StringValue{
|
||||||
|
StringValue: "INVALID",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return av
|
||||||
|
}
|
||||||
|
|
||||||
|
func boolSliceValues(vals []bool) []*cpb.AnyValue {
|
||||||
|
converted := make([]*cpb.AnyValue, len(vals))
|
||||||
|
for i, v := range vals {
|
||||||
|
converted[i] = &cpb.AnyValue{
|
||||||
|
Value: &cpb.AnyValue_BoolValue{
|
||||||
|
BoolValue: v,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return converted
|
||||||
|
}
|
||||||
|
|
||||||
|
func int64SliceValues(vals []int64) []*cpb.AnyValue {
|
||||||
|
converted := make([]*cpb.AnyValue, len(vals))
|
||||||
|
for i, v := range vals {
|
||||||
|
converted[i] = &cpb.AnyValue{
|
||||||
|
Value: &cpb.AnyValue_IntValue{
|
||||||
|
IntValue: v,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return converted
|
||||||
|
}
|
||||||
|
|
||||||
|
func float64SliceValues(vals []float64) []*cpb.AnyValue {
|
||||||
|
converted := make([]*cpb.AnyValue, len(vals))
|
||||||
|
for i, v := range vals {
|
||||||
|
converted[i] = &cpb.AnyValue{
|
||||||
|
Value: &cpb.AnyValue_DoubleValue{
|
||||||
|
DoubleValue: v,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return converted
|
||||||
|
}
|
||||||
|
|
||||||
|
func stringSliceValues(vals []string) []*cpb.AnyValue {
|
||||||
|
converted := make([]*cpb.AnyValue, len(vals))
|
||||||
|
for i, v := range vals {
|
||||||
|
converted[i] = &cpb.AnyValue{
|
||||||
|
Value: &cpb.AnyValue_StringValue{
|
||||||
|
StringValue: v,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return converted
|
||||||
|
}
|
114
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/error.go
generated
vendored
Normal file
114
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/error.go
generated
vendored
Normal file
|
@ -0,0 +1,114 @@
|
||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/otlpmetric/transform/error.go.tmpl
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
errUnknownAggregation = errors.New("unknown aggregation")
|
||||||
|
errUnknownTemporality = errors.New("unknown temporality")
|
||||||
|
)
|
||||||
|
|
||||||
|
type errMetric struct {
|
||||||
|
m *mpb.Metric
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e errMetric) Unwrap() error {
|
||||||
|
return e.err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e errMetric) Error() string {
|
||||||
|
format := "invalid metric (name: %q, description: %q, unit: %q): %s"
|
||||||
|
return fmt.Sprintf(format, e.m.Name, e.m.Description, e.m.Unit, e.err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e errMetric) Is(target error) bool {
|
||||||
|
return errors.Is(e.err, target)
|
||||||
|
}
|
||||||
|
|
||||||
|
// multiErr is used by the data-type transform functions to wrap multiple
|
||||||
|
// errors into a single return value. The error message will show all errors
|
||||||
|
// as a list and scope them by the datatype name that is returning them.
|
||||||
|
type multiErr struct {
|
||||||
|
datatype string
|
||||||
|
errs []error
|
||||||
|
}
|
||||||
|
|
||||||
|
// errOrNil returns nil if e contains no errors, otherwise it returns e.
|
||||||
|
func (e *multiErr) errOrNil() error {
|
||||||
|
if len(e.errs) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
// append adds err to e. If err is a multiErr, its errs are flattened into e.
|
||||||
|
func (e *multiErr) append(err error) {
|
||||||
|
// Do not use errors.As here, this should only be flattened one layer. If
|
||||||
|
// there is a *multiErr several steps down the chain, all the errors above
|
||||||
|
// it will be discarded if errors.As is used instead.
|
||||||
|
switch other := err.(type) {
|
||||||
|
case *multiErr:
|
||||||
|
// Flatten err errors into e.
|
||||||
|
e.errs = append(e.errs, other.errs...)
|
||||||
|
default:
|
||||||
|
e.errs = append(e.errs, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *multiErr) Error() string {
|
||||||
|
es := make([]string, len(e.errs))
|
||||||
|
for i, err := range e.errs {
|
||||||
|
es[i] = fmt.Sprintf("* %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
format := "%d errors occurred transforming %s:\n\t%s"
|
||||||
|
return fmt.Sprintf(format, len(es), e.datatype, strings.Join(es, "\n\t"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *multiErr) Unwrap() error {
|
||||||
|
switch len(e.errs) {
|
||||||
|
case 0:
|
||||||
|
return nil
|
||||||
|
case 1:
|
||||||
|
return e.errs[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return a multiErr without the leading error.
|
||||||
|
cp := &multiErr{
|
||||||
|
datatype: e.datatype,
|
||||||
|
errs: make([]error, len(e.errs)-1),
|
||||||
|
}
|
||||||
|
copy(cp.errs, e.errs[1:])
|
||||||
|
return cp
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *multiErr) Is(target error) bool {
|
||||||
|
if len(e.errs) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// Check if the first error is target.
|
||||||
|
return errors.Is(e.errs[0], target)
|
||||||
|
}
|
292
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata.go
generated
vendored
Normal file
292
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata.go
generated
vendored
Normal file
|
@ -0,0 +1,292 @@
|
||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/otlpmetric/transform/metricdata.go.tmpl
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package transform provides transformation functionality from the
|
||||||
|
// sdk/metric/metricdata data-types into OTLP data-types.
|
||||||
|
package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||||
|
cpb "go.opentelemetry.io/proto/otlp/common/v1"
|
||||||
|
mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||||
|
rpb "go.opentelemetry.io/proto/otlp/resource/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ResourceMetrics returns an OTLP ResourceMetrics generated from rm. If rm
|
||||||
|
// contains invalid ScopeMetrics, an error will be returned along with an OTLP
|
||||||
|
// ResourceMetrics that contains partial OTLP ScopeMetrics.
|
||||||
|
func ResourceMetrics(rm *metricdata.ResourceMetrics) (*mpb.ResourceMetrics, error) {
|
||||||
|
sms, err := ScopeMetrics(rm.ScopeMetrics)
|
||||||
|
return &mpb.ResourceMetrics{
|
||||||
|
Resource: &rpb.Resource{
|
||||||
|
Attributes: AttrIter(rm.Resource.Iter()),
|
||||||
|
},
|
||||||
|
ScopeMetrics: sms,
|
||||||
|
SchemaUrl: rm.Resource.SchemaURL(),
|
||||||
|
}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScopeMetrics returns a slice of OTLP ScopeMetrics generated from sms. If
|
||||||
|
// sms contains invalid metric values, an error will be returned along with a
|
||||||
|
// slice that contains partial OTLP ScopeMetrics.
|
||||||
|
func ScopeMetrics(sms []metricdata.ScopeMetrics) ([]*mpb.ScopeMetrics, error) {
|
||||||
|
errs := &multiErr{datatype: "ScopeMetrics"}
|
||||||
|
out := make([]*mpb.ScopeMetrics, 0, len(sms))
|
||||||
|
for _, sm := range sms {
|
||||||
|
ms, err := Metrics(sm.Metrics)
|
||||||
|
if err != nil {
|
||||||
|
errs.append(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
out = append(out, &mpb.ScopeMetrics{
|
||||||
|
Scope: &cpb.InstrumentationScope{
|
||||||
|
Name: sm.Scope.Name,
|
||||||
|
Version: sm.Scope.Version,
|
||||||
|
},
|
||||||
|
Metrics: ms,
|
||||||
|
SchemaUrl: sm.Scope.SchemaURL,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return out, errs.errOrNil()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Metrics returns a slice of OTLP Metric generated from ms. If ms contains
|
||||||
|
// invalid metric values, an error will be returned along with a slice that
|
||||||
|
// contains partial OTLP Metrics.
|
||||||
|
func Metrics(ms []metricdata.Metrics) ([]*mpb.Metric, error) {
|
||||||
|
errs := &multiErr{datatype: "Metrics"}
|
||||||
|
out := make([]*mpb.Metric, 0, len(ms))
|
||||||
|
for _, m := range ms {
|
||||||
|
o, err := metric(m)
|
||||||
|
if err != nil {
|
||||||
|
// Do not include invalid data. Drop the metric, report the error.
|
||||||
|
errs.append(errMetric{m: o, err: err})
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
out = append(out, o)
|
||||||
|
}
|
||||||
|
return out, errs.errOrNil()
|
||||||
|
}
|
||||||
|
|
||||||
|
func metric(m metricdata.Metrics) (*mpb.Metric, error) {
|
||||||
|
var err error
|
||||||
|
out := &mpb.Metric{
|
||||||
|
Name: m.Name,
|
||||||
|
Description: m.Description,
|
||||||
|
Unit: string(m.Unit),
|
||||||
|
}
|
||||||
|
switch a := m.Data.(type) {
|
||||||
|
case metricdata.Gauge[int64]:
|
||||||
|
out.Data = Gauge[int64](a)
|
||||||
|
case metricdata.Gauge[float64]:
|
||||||
|
out.Data = Gauge[float64](a)
|
||||||
|
case metricdata.Sum[int64]:
|
||||||
|
out.Data, err = Sum[int64](a)
|
||||||
|
case metricdata.Sum[float64]:
|
||||||
|
out.Data, err = Sum[float64](a)
|
||||||
|
case metricdata.Histogram[int64]:
|
||||||
|
out.Data, err = Histogram(a)
|
||||||
|
case metricdata.Histogram[float64]:
|
||||||
|
out.Data, err = Histogram(a)
|
||||||
|
case metricdata.ExponentialHistogram[int64]:
|
||||||
|
out.Data, err = ExponentialHistogram(a)
|
||||||
|
case metricdata.ExponentialHistogram[float64]:
|
||||||
|
out.Data, err = ExponentialHistogram(a)
|
||||||
|
default:
|
||||||
|
return out, fmt.Errorf("%w: %T", errUnknownAggregation, a)
|
||||||
|
}
|
||||||
|
return out, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Gauge returns an OTLP Metric_Gauge generated from g.
|
||||||
|
func Gauge[N int64 | float64](g metricdata.Gauge[N]) *mpb.Metric_Gauge {
|
||||||
|
return &mpb.Metric_Gauge{
|
||||||
|
Gauge: &mpb.Gauge{
|
||||||
|
DataPoints: DataPoints(g.DataPoints),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sum returns an OTLP Metric_Sum generated from s. An error is returned
|
||||||
|
// if the temporality of s is unknown.
|
||||||
|
func Sum[N int64 | float64](s metricdata.Sum[N]) (*mpb.Metric_Sum, error) {
|
||||||
|
t, err := Temporality(s.Temporality)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &mpb.Metric_Sum{
|
||||||
|
Sum: &mpb.Sum{
|
||||||
|
AggregationTemporality: t,
|
||||||
|
IsMonotonic: s.IsMonotonic,
|
||||||
|
DataPoints: DataPoints(s.DataPoints),
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DataPoints returns a slice of OTLP NumberDataPoint generated from dPts.
|
||||||
|
func DataPoints[N int64 | float64](dPts []metricdata.DataPoint[N]) []*mpb.NumberDataPoint {
|
||||||
|
out := make([]*mpb.NumberDataPoint, 0, len(dPts))
|
||||||
|
for _, dPt := range dPts {
|
||||||
|
ndp := &mpb.NumberDataPoint{
|
||||||
|
Attributes: AttrIter(dPt.Attributes.Iter()),
|
||||||
|
StartTimeUnixNano: timeUnixNano(dPt.StartTime),
|
||||||
|
TimeUnixNano: timeUnixNano(dPt.Time),
|
||||||
|
}
|
||||||
|
switch v := any(dPt.Value).(type) {
|
||||||
|
case int64:
|
||||||
|
ndp.Value = &mpb.NumberDataPoint_AsInt{
|
||||||
|
AsInt: v,
|
||||||
|
}
|
||||||
|
case float64:
|
||||||
|
ndp.Value = &mpb.NumberDataPoint_AsDouble{
|
||||||
|
AsDouble: v,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
out = append(out, ndp)
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// Histogram returns an OTLP Metric_Histogram generated from h. An error is
|
||||||
|
// returned if the temporality of h is unknown.
|
||||||
|
func Histogram[N int64 | float64](h metricdata.Histogram[N]) (*mpb.Metric_Histogram, error) {
|
||||||
|
t, err := Temporality(h.Temporality)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &mpb.Metric_Histogram{
|
||||||
|
Histogram: &mpb.Histogram{
|
||||||
|
AggregationTemporality: t,
|
||||||
|
DataPoints: HistogramDataPoints(h.DataPoints),
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// HistogramDataPoints returns a slice of OTLP HistogramDataPoint generated
|
||||||
|
// from dPts.
|
||||||
|
func HistogramDataPoints[N int64 | float64](dPts []metricdata.HistogramDataPoint[N]) []*mpb.HistogramDataPoint {
|
||||||
|
out := make([]*mpb.HistogramDataPoint, 0, len(dPts))
|
||||||
|
for _, dPt := range dPts {
|
||||||
|
sum := float64(dPt.Sum)
|
||||||
|
hdp := &mpb.HistogramDataPoint{
|
||||||
|
Attributes: AttrIter(dPt.Attributes.Iter()),
|
||||||
|
StartTimeUnixNano: timeUnixNano(dPt.StartTime),
|
||||||
|
TimeUnixNano: timeUnixNano(dPt.Time),
|
||||||
|
Count: dPt.Count,
|
||||||
|
Sum: &sum,
|
||||||
|
BucketCounts: dPt.BucketCounts,
|
||||||
|
ExplicitBounds: dPt.Bounds,
|
||||||
|
}
|
||||||
|
if v, ok := dPt.Min.Value(); ok {
|
||||||
|
vF64 := float64(v)
|
||||||
|
hdp.Min = &vF64
|
||||||
|
}
|
||||||
|
if v, ok := dPt.Max.Value(); ok {
|
||||||
|
vF64 := float64(v)
|
||||||
|
hdp.Max = &vF64
|
||||||
|
}
|
||||||
|
out = append(out, hdp)
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExponentialHistogram returns an OTLP Metric_ExponentialHistogram generated from h. An error is
|
||||||
|
// returned if the temporality of h is unknown.
|
||||||
|
func ExponentialHistogram[N int64 | float64](h metricdata.ExponentialHistogram[N]) (*mpb.Metric_ExponentialHistogram, error) {
|
||||||
|
t, err := Temporality(h.Temporality)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &mpb.Metric_ExponentialHistogram{
|
||||||
|
ExponentialHistogram: &mpb.ExponentialHistogram{
|
||||||
|
AggregationTemporality: t,
|
||||||
|
DataPoints: ExponentialHistogramDataPoints(h.DataPoints),
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExponentialHistogramDataPoints returns a slice of OTLP ExponentialHistogramDataPoint generated
|
||||||
|
// from dPts.
|
||||||
|
func ExponentialHistogramDataPoints[N int64 | float64](dPts []metricdata.ExponentialHistogramDataPoint[N]) []*mpb.ExponentialHistogramDataPoint {
|
||||||
|
out := make([]*mpb.ExponentialHistogramDataPoint, 0, len(dPts))
|
||||||
|
for _, dPt := range dPts {
|
||||||
|
sum := float64(dPt.Sum)
|
||||||
|
ehdp := &mpb.ExponentialHistogramDataPoint{
|
||||||
|
Attributes: AttrIter(dPt.Attributes.Iter()),
|
||||||
|
StartTimeUnixNano: timeUnixNano(dPt.StartTime),
|
||||||
|
TimeUnixNano: timeUnixNano(dPt.Time),
|
||||||
|
Count: dPt.Count,
|
||||||
|
Sum: &sum,
|
||||||
|
Scale: dPt.Scale,
|
||||||
|
ZeroCount: dPt.ZeroCount,
|
||||||
|
|
||||||
|
Positive: ExponentialHistogramDataPointBuckets(dPt.PositiveBucket),
|
||||||
|
Negative: ExponentialHistogramDataPointBuckets(dPt.NegativeBucket),
|
||||||
|
}
|
||||||
|
if v, ok := dPt.Min.Value(); ok {
|
||||||
|
vF64 := float64(v)
|
||||||
|
ehdp.Min = &vF64
|
||||||
|
}
|
||||||
|
if v, ok := dPt.Max.Value(); ok {
|
||||||
|
vF64 := float64(v)
|
||||||
|
ehdp.Max = &vF64
|
||||||
|
}
|
||||||
|
out = append(out, ehdp)
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExponentialHistogramDataPointBuckets returns an OTLP ExponentialHistogramDataPoint_Buckets generated
|
||||||
|
// from bucket.
|
||||||
|
func ExponentialHistogramDataPointBuckets(bucket metricdata.ExponentialBucket) *mpb.ExponentialHistogramDataPoint_Buckets {
|
||||||
|
return &mpb.ExponentialHistogramDataPoint_Buckets{
|
||||||
|
Offset: bucket.Offset,
|
||||||
|
BucketCounts: bucket.Counts,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Temporality returns an OTLP AggregationTemporality generated from t. If t
|
||||||
|
// is unknown, an error is returned along with the invalid
|
||||||
|
// AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED.
|
||||||
|
func Temporality(t metricdata.Temporality) (mpb.AggregationTemporality, error) {
|
||||||
|
switch t {
|
||||||
|
case metricdata.DeltaTemporality:
|
||||||
|
return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, nil
|
||||||
|
case metricdata.CumulativeTemporality:
|
||||||
|
return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, nil
|
||||||
|
default:
|
||||||
|
err := fmt.Errorf("%w: %s", errUnknownTemporality, t)
|
||||||
|
return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// timeUnixNano returns t as a Unix time, the number of nanoseconds elapsed
|
||||||
|
// since January 1, 1970 UTC as uint64.
|
||||||
|
// The result is undefined if the Unix time
|
||||||
|
// in nanoseconds cannot be represented by an int64
|
||||||
|
// (a date before the year 1678 or after 2262).
|
||||||
|
// timeUnixNano on the zero Time returns 0.
|
||||||
|
// The result does not depend on the location associated with t.
|
||||||
|
func timeUnixNano(t time.Time) uint64 {
|
||||||
|
if t.IsZero() {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return uint64(t.UnixNano())
|
||||||
|
}
|
20
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go
generated
vendored
Normal file
20
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
|
||||||
|
|
||||||
|
// Version is the current release version of the OpenTelemetry OTLP over gRPC metrics exporter in use.
|
||||||
|
func Version() string {
|
||||||
|
return "0.44.0"
|
||||||
|
}
|
|
@ -0,0 +1,201 @@
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
54
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/clients.go
generated
vendored
Normal file
54
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/clients.go
generated
vendored
Normal file
|
@ -0,0 +1,54 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Client manages connections to the collector, handles the
|
||||||
|
// transformation of data into wire format, and the transmission of that
|
||||||
|
// data to the collector.
|
||||||
|
type Client interface {
|
||||||
|
// DO NOT CHANGE: any modification will not be backwards compatible and
|
||||||
|
// must never be done outside of a new major release.
|
||||||
|
|
||||||
|
// Start should establish connection(s) to endpoint(s). It is
|
||||||
|
// called just once by the exporter, so the implementation
|
||||||
|
// does not need to worry about idempotence and locking.
|
||||||
|
Start(ctx context.Context) error
|
||||||
|
// DO NOT CHANGE: any modification will not be backwards compatible and
|
||||||
|
// must never be done outside of a new major release.
|
||||||
|
|
||||||
|
// Stop should close the connections. The function is called
|
||||||
|
// only once by the exporter, so the implementation does not
|
||||||
|
// need to worry about idempotence, but it may be called
|
||||||
|
// concurrently with UploadTraces, so proper
|
||||||
|
// locking is required. The function serves as a
|
||||||
|
// synchronization point - after the function returns, the
|
||||||
|
// process of closing connections is assumed to be finished.
|
||||||
|
Stop(ctx context.Context) error
|
||||||
|
// DO NOT CHANGE: any modification will not be backwards compatible and
|
||||||
|
// must never be done outside of a new major release.
|
||||||
|
|
||||||
|
// UploadTraces should transform the passed traces to the wire
|
||||||
|
// format and send it to the collector. May be called
|
||||||
|
// concurrently.
|
||||||
|
UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) error
|
||||||
|
// DO NOT CHANGE: any modification will not be backwards compatible and
|
||||||
|
// must never be done outside of a new major release.
|
||||||
|
}
|
|
@ -0,0 +1,21 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package otlptrace contains abstractions for OTLP span exporters.
|
||||||
|
See the official OTLP span exporter implementations:
|
||||||
|
- [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc],
|
||||||
|
- [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp].
|
||||||
|
*/
|
||||||
|
package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
|
116
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go
generated
vendored
Normal file
116
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go
generated
vendored
Normal file
|
@ -0,0 +1,116 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
|
||||||
|
tracesdk "go.opentelemetry.io/otel/sdk/trace"
|
||||||
|
)
|
||||||
|
|
||||||
|
var errAlreadyStarted = errors.New("already started")
|
||||||
|
|
||||||
|
// Exporter exports trace data in the OTLP wire format.
|
||||||
|
type Exporter struct {
|
||||||
|
client Client
|
||||||
|
|
||||||
|
mu sync.RWMutex
|
||||||
|
started bool
|
||||||
|
|
||||||
|
startOnce sync.Once
|
||||||
|
stopOnce sync.Once
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExportSpans exports a batch of spans.
|
||||||
|
func (e *Exporter) ExportSpans(ctx context.Context, ss []tracesdk.ReadOnlySpan) error {
|
||||||
|
protoSpans := tracetransform.Spans(ss)
|
||||||
|
if len(protoSpans) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
err := e.client.UploadTraces(ctx, protoSpans)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("traces export: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start establishes a connection to the receiving endpoint.
|
||||||
|
func (e *Exporter) Start(ctx context.Context) error {
|
||||||
|
err := errAlreadyStarted
|
||||||
|
e.startOnce.Do(func() {
|
||||||
|
e.mu.Lock()
|
||||||
|
e.started = true
|
||||||
|
e.mu.Unlock()
|
||||||
|
err = e.client.Start(ctx)
|
||||||
|
})
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shutdown flushes all exports and closes all connections to the receiving endpoint.
|
||||||
|
func (e *Exporter) Shutdown(ctx context.Context) error {
|
||||||
|
e.mu.RLock()
|
||||||
|
started := e.started
|
||||||
|
e.mu.RUnlock()
|
||||||
|
|
||||||
|
if !started {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
|
||||||
|
e.stopOnce.Do(func() {
|
||||||
|
err = e.client.Stop(ctx)
|
||||||
|
e.mu.Lock()
|
||||||
|
e.started = false
|
||||||
|
e.mu.Unlock()
|
||||||
|
})
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ tracesdk.SpanExporter = (*Exporter)(nil)
|
||||||
|
|
||||||
|
// New constructs a new Exporter and starts it.
|
||||||
|
func New(ctx context.Context, client Client) (*Exporter, error) {
|
||||||
|
exp := NewUnstarted(client)
|
||||||
|
if err := exp.Start(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return exp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewUnstarted constructs a new Exporter and does not start it.
|
||||||
|
func NewUnstarted(client Client) *Exporter {
|
||||||
|
return &Exporter{
|
||||||
|
client: client,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalLog is the marshaling function used by the logging system to represent this exporter.
|
||||||
|
func (e *Exporter) MarshalLog() interface{} {
|
||||||
|
return struct {
|
||||||
|
Type string
|
||||||
|
Client Client
|
||||||
|
}{
|
||||||
|
Type: "otlptrace",
|
||||||
|
Client: e.client,
|
||||||
|
}
|
||||||
|
}
|
158
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go
generated
vendored
Normal file
158
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go
generated
vendored
Normal file
|
@ -0,0 +1,158 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"go.opentelemetry.io/otel/attribute"
|
||||||
|
"go.opentelemetry.io/otel/sdk/resource"
|
||||||
|
commonpb "go.opentelemetry.io/proto/otlp/common/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// KeyValues transforms a slice of attribute KeyValues into OTLP key-values.
|
||||||
|
func KeyValues(attrs []attribute.KeyValue) []*commonpb.KeyValue {
|
||||||
|
if len(attrs) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
out := make([]*commonpb.KeyValue, 0, len(attrs))
|
||||||
|
for _, kv := range attrs {
|
||||||
|
out = append(out, KeyValue(kv))
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// Iterator transforms an attribute iterator into OTLP key-values.
|
||||||
|
func Iterator(iter attribute.Iterator) []*commonpb.KeyValue {
|
||||||
|
l := iter.Len()
|
||||||
|
if l == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
out := make([]*commonpb.KeyValue, 0, l)
|
||||||
|
for iter.Next() {
|
||||||
|
out = append(out, KeyValue(iter.Attribute()))
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResourceAttributes transforms a Resource OTLP key-values.
|
||||||
|
func ResourceAttributes(res *resource.Resource) []*commonpb.KeyValue {
|
||||||
|
return Iterator(res.Iter())
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyValue transforms an attribute KeyValue into an OTLP key-value.
|
||||||
|
func KeyValue(kv attribute.KeyValue) *commonpb.KeyValue {
|
||||||
|
return &commonpb.KeyValue{Key: string(kv.Key), Value: Value(kv.Value)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value transforms an attribute Value into an OTLP AnyValue.
|
||||||
|
func Value(v attribute.Value) *commonpb.AnyValue {
|
||||||
|
av := new(commonpb.AnyValue)
|
||||||
|
switch v.Type() {
|
||||||
|
case attribute.BOOL:
|
||||||
|
av.Value = &commonpb.AnyValue_BoolValue{
|
||||||
|
BoolValue: v.AsBool(),
|
||||||
|
}
|
||||||
|
case attribute.BOOLSLICE:
|
||||||
|
av.Value = &commonpb.AnyValue_ArrayValue{
|
||||||
|
ArrayValue: &commonpb.ArrayValue{
|
||||||
|
Values: boolSliceValues(v.AsBoolSlice()),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
case attribute.INT64:
|
||||||
|
av.Value = &commonpb.AnyValue_IntValue{
|
||||||
|
IntValue: v.AsInt64(),
|
||||||
|
}
|
||||||
|
case attribute.INT64SLICE:
|
||||||
|
av.Value = &commonpb.AnyValue_ArrayValue{
|
||||||
|
ArrayValue: &commonpb.ArrayValue{
|
||||||
|
Values: int64SliceValues(v.AsInt64Slice()),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
case attribute.FLOAT64:
|
||||||
|
av.Value = &commonpb.AnyValue_DoubleValue{
|
||||||
|
DoubleValue: v.AsFloat64(),
|
||||||
|
}
|
||||||
|
case attribute.FLOAT64SLICE:
|
||||||
|
av.Value = &commonpb.AnyValue_ArrayValue{
|
||||||
|
ArrayValue: &commonpb.ArrayValue{
|
||||||
|
Values: float64SliceValues(v.AsFloat64Slice()),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
case attribute.STRING:
|
||||||
|
av.Value = &commonpb.AnyValue_StringValue{
|
||||||
|
StringValue: v.AsString(),
|
||||||
|
}
|
||||||
|
case attribute.STRINGSLICE:
|
||||||
|
av.Value = &commonpb.AnyValue_ArrayValue{
|
||||||
|
ArrayValue: &commonpb.ArrayValue{
|
||||||
|
Values: stringSliceValues(v.AsStringSlice()),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
av.Value = &commonpb.AnyValue_StringValue{
|
||||||
|
StringValue: "INVALID",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return av
|
||||||
|
}
|
||||||
|
|
||||||
|
func boolSliceValues(vals []bool) []*commonpb.AnyValue {
|
||||||
|
converted := make([]*commonpb.AnyValue, len(vals))
|
||||||
|
for i, v := range vals {
|
||||||
|
converted[i] = &commonpb.AnyValue{
|
||||||
|
Value: &commonpb.AnyValue_BoolValue{
|
||||||
|
BoolValue: v,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return converted
|
||||||
|
}
|
||||||
|
|
||||||
|
func int64SliceValues(vals []int64) []*commonpb.AnyValue {
|
||||||
|
converted := make([]*commonpb.AnyValue, len(vals))
|
||||||
|
for i, v := range vals {
|
||||||
|
converted[i] = &commonpb.AnyValue{
|
||||||
|
Value: &commonpb.AnyValue_IntValue{
|
||||||
|
IntValue: v,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return converted
|
||||||
|
}
|
||||||
|
|
||||||
|
func float64SliceValues(vals []float64) []*commonpb.AnyValue {
|
||||||
|
converted := make([]*commonpb.AnyValue, len(vals))
|
||||||
|
for i, v := range vals {
|
||||||
|
converted[i] = &commonpb.AnyValue{
|
||||||
|
Value: &commonpb.AnyValue_DoubleValue{
|
||||||
|
DoubleValue: v,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return converted
|
||||||
|
}
|
||||||
|
|
||||||
|
func stringSliceValues(vals []string) []*commonpb.AnyValue {
|
||||||
|
converted := make([]*commonpb.AnyValue, len(vals))
|
||||||
|
for i, v := range vals {
|
||||||
|
converted[i] = &commonpb.AnyValue{
|
||||||
|
Value: &commonpb.AnyValue_StringValue{
|
||||||
|
StringValue: v,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return converted
|
||||||
|
}
|
30
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go
generated
vendored
Normal file
30
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go
generated
vendored
Normal file
|
@ -0,0 +1,30 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||||
|
commonpb "go.opentelemetry.io/proto/otlp/common/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
func InstrumentationScope(il instrumentation.Scope) *commonpb.InstrumentationScope {
|
||||||
|
if il == (instrumentation.Scope{}) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &commonpb.InstrumentationScope{
|
||||||
|
Name: il.Name,
|
||||||
|
Version: il.Version,
|
||||||
|
}
|
||||||
|
}
|
28
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go
generated
vendored
Normal file
28
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go
generated
vendored
Normal file
|
@ -0,0 +1,28 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"go.opentelemetry.io/otel/sdk/resource"
|
||||||
|
resourcepb "go.opentelemetry.io/proto/otlp/resource/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Resource transforms a Resource into an OTLP Resource.
|
||||||
|
func Resource(r *resource.Resource) *resourcepb.Resource {
|
||||||
|
if r == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &resourcepb.Resource{Attributes: ResourceAttributes(r)}
|
||||||
|
}
|
205
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go
generated
vendored
Normal file
205
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go
generated
vendored
Normal file
|
@ -0,0 +1,205 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"go.opentelemetry.io/otel/attribute"
|
||||||
|
"go.opentelemetry.io/otel/codes"
|
||||||
|
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||||
|
tracesdk "go.opentelemetry.io/otel/sdk/trace"
|
||||||
|
"go.opentelemetry.io/otel/trace"
|
||||||
|
tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Spans transforms a slice of OpenTelemetry spans into a slice of OTLP
|
||||||
|
// ResourceSpans.
|
||||||
|
func Spans(sdl []tracesdk.ReadOnlySpan) []*tracepb.ResourceSpans {
|
||||||
|
if len(sdl) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
rsm := make(map[attribute.Distinct]*tracepb.ResourceSpans)
|
||||||
|
|
||||||
|
type key struct {
|
||||||
|
r attribute.Distinct
|
||||||
|
is instrumentation.Scope
|
||||||
|
}
|
||||||
|
ssm := make(map[key]*tracepb.ScopeSpans)
|
||||||
|
|
||||||
|
var resources int
|
||||||
|
for _, sd := range sdl {
|
||||||
|
if sd == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
rKey := sd.Resource().Equivalent()
|
||||||
|
k := key{
|
||||||
|
r: rKey,
|
||||||
|
is: sd.InstrumentationScope(),
|
||||||
|
}
|
||||||
|
scopeSpan, iOk := ssm[k]
|
||||||
|
if !iOk {
|
||||||
|
// Either the resource or instrumentation scope were unknown.
|
||||||
|
scopeSpan = &tracepb.ScopeSpans{
|
||||||
|
Scope: InstrumentationScope(sd.InstrumentationScope()),
|
||||||
|
Spans: []*tracepb.Span{},
|
||||||
|
SchemaUrl: sd.InstrumentationScope().SchemaURL,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
scopeSpan.Spans = append(scopeSpan.Spans, span(sd))
|
||||||
|
ssm[k] = scopeSpan
|
||||||
|
|
||||||
|
rs, rOk := rsm[rKey]
|
||||||
|
if !rOk {
|
||||||
|
resources++
|
||||||
|
// The resource was unknown.
|
||||||
|
rs = &tracepb.ResourceSpans{
|
||||||
|
Resource: Resource(sd.Resource()),
|
||||||
|
ScopeSpans: []*tracepb.ScopeSpans{scopeSpan},
|
||||||
|
SchemaUrl: sd.Resource().SchemaURL(),
|
||||||
|
}
|
||||||
|
rsm[rKey] = rs
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// The resource has been seen before. Check if the instrumentation
|
||||||
|
// library lookup was unknown because if so we need to add it to the
|
||||||
|
// ResourceSpans. Otherwise, the instrumentation library has already
|
||||||
|
// been seen and the append we did above will be included it in the
|
||||||
|
// ScopeSpans reference.
|
||||||
|
if !iOk {
|
||||||
|
rs.ScopeSpans = append(rs.ScopeSpans, scopeSpan)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Transform the categorized map into a slice
|
||||||
|
rss := make([]*tracepb.ResourceSpans, 0, resources)
|
||||||
|
for _, rs := range rsm {
|
||||||
|
rss = append(rss, rs)
|
||||||
|
}
|
||||||
|
return rss
|
||||||
|
}
|
||||||
|
|
||||||
|
// span transforms a Span into an OTLP span.
|
||||||
|
func span(sd tracesdk.ReadOnlySpan) *tracepb.Span {
|
||||||
|
if sd == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
tid := sd.SpanContext().TraceID()
|
||||||
|
sid := sd.SpanContext().SpanID()
|
||||||
|
|
||||||
|
s := &tracepb.Span{
|
||||||
|
TraceId: tid[:],
|
||||||
|
SpanId: sid[:],
|
||||||
|
TraceState: sd.SpanContext().TraceState().String(),
|
||||||
|
Status: status(sd.Status().Code, sd.Status().Description),
|
||||||
|
StartTimeUnixNano: uint64(sd.StartTime().UnixNano()),
|
||||||
|
EndTimeUnixNano: uint64(sd.EndTime().UnixNano()),
|
||||||
|
Links: links(sd.Links()),
|
||||||
|
Kind: spanKind(sd.SpanKind()),
|
||||||
|
Name: sd.Name(),
|
||||||
|
Attributes: KeyValues(sd.Attributes()),
|
||||||
|
Events: spanEvents(sd.Events()),
|
||||||
|
DroppedAttributesCount: uint32(sd.DroppedAttributes()),
|
||||||
|
DroppedEventsCount: uint32(sd.DroppedEvents()),
|
||||||
|
DroppedLinksCount: uint32(sd.DroppedLinks()),
|
||||||
|
}
|
||||||
|
|
||||||
|
if psid := sd.Parent().SpanID(); psid.IsValid() {
|
||||||
|
s.ParentSpanId = psid[:]
|
||||||
|
}
|
||||||
|
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// status transform a span code and message into an OTLP span status.
|
||||||
|
func status(status codes.Code, message string) *tracepb.Status {
|
||||||
|
var c tracepb.Status_StatusCode
|
||||||
|
switch status {
|
||||||
|
case codes.Ok:
|
||||||
|
c = tracepb.Status_STATUS_CODE_OK
|
||||||
|
case codes.Error:
|
||||||
|
c = tracepb.Status_STATUS_CODE_ERROR
|
||||||
|
default:
|
||||||
|
c = tracepb.Status_STATUS_CODE_UNSET
|
||||||
|
}
|
||||||
|
return &tracepb.Status{
|
||||||
|
Code: c,
|
||||||
|
Message: message,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// links transforms span Links to OTLP span links.
|
||||||
|
func links(links []tracesdk.Link) []*tracepb.Span_Link {
|
||||||
|
if len(links) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
sl := make([]*tracepb.Span_Link, 0, len(links))
|
||||||
|
for _, otLink := range links {
|
||||||
|
// This redefinition is necessary to prevent otLink.*ID[:] copies
|
||||||
|
// being reused -- in short we need a new otLink per iteration.
|
||||||
|
otLink := otLink
|
||||||
|
|
||||||
|
tid := otLink.SpanContext.TraceID()
|
||||||
|
sid := otLink.SpanContext.SpanID()
|
||||||
|
|
||||||
|
sl = append(sl, &tracepb.Span_Link{
|
||||||
|
TraceId: tid[:],
|
||||||
|
SpanId: sid[:],
|
||||||
|
Attributes: KeyValues(otLink.Attributes),
|
||||||
|
DroppedAttributesCount: uint32(otLink.DroppedAttributeCount),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return sl
|
||||||
|
}
|
||||||
|
|
||||||
|
// spanEvents transforms span Events to an OTLP span events.
|
||||||
|
func spanEvents(es []tracesdk.Event) []*tracepb.Span_Event {
|
||||||
|
if len(es) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
events := make([]*tracepb.Span_Event, len(es))
|
||||||
|
// Transform message events
|
||||||
|
for i := 0; i < len(es); i++ {
|
||||||
|
events[i] = &tracepb.Span_Event{
|
||||||
|
Name: es[i].Name,
|
||||||
|
TimeUnixNano: uint64(es[i].Time.UnixNano()),
|
||||||
|
Attributes: KeyValues(es[i].Attributes),
|
||||||
|
DroppedAttributesCount: uint32(es[i].DroppedAttributeCount),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return events
|
||||||
|
}
|
||||||
|
|
||||||
|
// spanKind transforms a SpanKind to an OTLP span kind.
|
||||||
|
func spanKind(kind trace.SpanKind) tracepb.Span_SpanKind {
|
||||||
|
switch kind {
|
||||||
|
case trace.SpanKindInternal:
|
||||||
|
return tracepb.Span_SPAN_KIND_INTERNAL
|
||||||
|
case trace.SpanKindClient:
|
||||||
|
return tracepb.Span_SPAN_KIND_CLIENT
|
||||||
|
case trace.SpanKindServer:
|
||||||
|
return tracepb.Span_SPAN_KIND_SERVER
|
||||||
|
case trace.SpanKindProducer:
|
||||||
|
return tracepb.Span_SPAN_KIND_PRODUCER
|
||||||
|
case trace.SpanKindConsumer:
|
||||||
|
return tracepb.Span_SPAN_KIND_CONSUMER
|
||||||
|
default:
|
||||||
|
return tracepb.Span_SPAN_KIND_UNSPECIFIED
|
||||||
|
}
|
||||||
|
}
|
201
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/LICENSE
generated
vendored
Normal file
201
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,201 @@
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
306
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go
generated
vendored
Normal file
306
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go
generated
vendored
Normal file
|
@ -0,0 +1,306 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"google.golang.org/genproto/googleapis/rpc/errdetails"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel"
|
||||||
|
"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
|
||||||
|
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal"
|
||||||
|
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
|
||||||
|
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry"
|
||||||
|
coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1"
|
||||||
|
tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
type client struct {
|
||||||
|
endpoint string
|
||||||
|
dialOpts []grpc.DialOption
|
||||||
|
metadata metadata.MD
|
||||||
|
exportTimeout time.Duration
|
||||||
|
requestFunc retry.RequestFunc
|
||||||
|
|
||||||
|
// stopCtx is used as a parent context for all exports. Therefore, when it
|
||||||
|
// is canceled with the stopFunc all exports are canceled.
|
||||||
|
stopCtx context.Context
|
||||||
|
// stopFunc cancels stopCtx, stopping any active exports.
|
||||||
|
stopFunc context.CancelFunc
|
||||||
|
|
||||||
|
// ourConn keeps track of where conn was created: true if created here on
|
||||||
|
// Start, or false if passed with an option. This is important on Shutdown
|
||||||
|
// as the conn should only be closed if created here on start. Otherwise,
|
||||||
|
// it is up to the processes that passed the conn to close it.
|
||||||
|
ourConn bool
|
||||||
|
conn *grpc.ClientConn
|
||||||
|
tscMu sync.RWMutex
|
||||||
|
tsc coltracepb.TraceServiceClient
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compile time check *client implements otlptrace.Client.
|
||||||
|
var _ otlptrace.Client = (*client)(nil)
|
||||||
|
|
||||||
|
// NewClient creates a new gRPC trace client.
|
||||||
|
func NewClient(opts ...Option) otlptrace.Client {
|
||||||
|
return newClient(opts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newClient(opts ...Option) *client {
|
||||||
|
cfg := otlpconfig.NewGRPCConfig(asGRPCOptions(opts)...)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
|
||||||
|
c := &client{
|
||||||
|
endpoint: cfg.Traces.Endpoint,
|
||||||
|
exportTimeout: cfg.Traces.Timeout,
|
||||||
|
requestFunc: cfg.RetryConfig.RequestFunc(retryable),
|
||||||
|
dialOpts: cfg.DialOptions,
|
||||||
|
stopCtx: ctx,
|
||||||
|
stopFunc: cancel,
|
||||||
|
conn: cfg.GRPCConn,
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(cfg.Traces.Headers) > 0 {
|
||||||
|
c.metadata = metadata.New(cfg.Traces.Headers)
|
||||||
|
}
|
||||||
|
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start establishes a gRPC connection to the collector.
|
||||||
|
func (c *client) Start(ctx context.Context) error {
|
||||||
|
if c.conn == nil {
|
||||||
|
// If the caller did not provide a ClientConn when the client was
|
||||||
|
// created, create one using the configuration they did provide.
|
||||||
|
conn, err := grpc.DialContext(ctx, c.endpoint, c.dialOpts...)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Keep track that we own the lifecycle of this conn and need to close
|
||||||
|
// it on Shutdown.
|
||||||
|
c.ourConn = true
|
||||||
|
c.conn = conn
|
||||||
|
}
|
||||||
|
|
||||||
|
// The otlptrace.Client interface states this method is called just once,
|
||||||
|
// so no need to check if already started.
|
||||||
|
c.tscMu.Lock()
|
||||||
|
c.tsc = coltracepb.NewTraceServiceClient(c.conn)
|
||||||
|
c.tscMu.Unlock()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var errAlreadyStopped = errors.New("the client is already stopped")
|
||||||
|
|
||||||
|
// Stop shuts down the client.
|
||||||
|
//
|
||||||
|
// Any active connections to a remote endpoint are closed if they were created
|
||||||
|
// by the client. Any gRPC connection passed during creation using
|
||||||
|
// WithGRPCConn will not be closed. It is the caller's responsibility to
|
||||||
|
// handle cleanup of that resource.
|
||||||
|
//
|
||||||
|
// This method synchronizes with the UploadTraces method of the client. It
|
||||||
|
// will wait for any active calls to that method to complete unimpeded, or it
|
||||||
|
// will cancel any active calls if ctx expires. If ctx expires, the context
|
||||||
|
// error will be forwarded as the returned error. All client held resources
|
||||||
|
// will still be released in this situation.
|
||||||
|
//
|
||||||
|
// If the client has already stopped, an error will be returned describing
|
||||||
|
// this.
|
||||||
|
func (c *client) Stop(ctx context.Context) error {
|
||||||
|
// Make sure to return context error if the context is done when calling this method.
|
||||||
|
err := ctx.Err()
|
||||||
|
|
||||||
|
// Acquire the c.tscMu lock within the ctx lifetime.
|
||||||
|
acquired := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
c.tscMu.Lock()
|
||||||
|
close(acquired)
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
// The Stop timeout is reached. Kill any remaining exports to force
|
||||||
|
// the clear of the lock and save the timeout error to return and
|
||||||
|
// signal the shutdown timed out before cleanly stopping.
|
||||||
|
c.stopFunc()
|
||||||
|
err = ctx.Err()
|
||||||
|
|
||||||
|
// To ensure the client is not left in a dirty state c.tsc needs to be
|
||||||
|
// set to nil. To avoid the race condition when doing this, ensure
|
||||||
|
// that all the exports are killed (initiated by c.stopFunc).
|
||||||
|
<-acquired
|
||||||
|
case <-acquired:
|
||||||
|
}
|
||||||
|
// Hold the tscMu lock for the rest of the function to ensure no new
|
||||||
|
// exports are started.
|
||||||
|
defer c.tscMu.Unlock()
|
||||||
|
|
||||||
|
// The otlptrace.Client interface states this method is called only
|
||||||
|
// once, but there is no guarantee it is called after Start. Ensure the
|
||||||
|
// client is started before doing anything and let the called know if they
|
||||||
|
// made a mistake.
|
||||||
|
if c.tsc == nil {
|
||||||
|
return errAlreadyStopped
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear c.tsc to signal the client is stopped.
|
||||||
|
c.tsc = nil
|
||||||
|
|
||||||
|
if c.ourConn {
|
||||||
|
closeErr := c.conn.Close()
|
||||||
|
// A context timeout error takes precedence over this error.
|
||||||
|
if err == nil && closeErr != nil {
|
||||||
|
err = closeErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var errShutdown = errors.New("the client is shutdown")
|
||||||
|
|
||||||
|
// UploadTraces sends a batch of spans.
|
||||||
|
//
|
||||||
|
// Retryable errors from the server will be handled according to any
|
||||||
|
// RetryConfig the client was created with.
|
||||||
|
func (c *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) error {
|
||||||
|
// Hold a read lock to ensure a shut down initiated after this starts does
|
||||||
|
// not abandon the export. This read lock acquire has less priority than a
|
||||||
|
// write lock acquire (i.e. Stop), meaning if the client is shutting down
|
||||||
|
// this will come after the shut down.
|
||||||
|
c.tscMu.RLock()
|
||||||
|
defer c.tscMu.RUnlock()
|
||||||
|
|
||||||
|
if c.tsc == nil {
|
||||||
|
return errShutdown
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := c.exportContext(ctx)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
return c.requestFunc(ctx, func(iCtx context.Context) error {
|
||||||
|
resp, err := c.tsc.Export(iCtx, &coltracepb.ExportTraceServiceRequest{
|
||||||
|
ResourceSpans: protoSpans,
|
||||||
|
})
|
||||||
|
if resp != nil && resp.PartialSuccess != nil {
|
||||||
|
msg := resp.PartialSuccess.GetErrorMessage()
|
||||||
|
n := resp.PartialSuccess.GetRejectedSpans()
|
||||||
|
if n != 0 || msg != "" {
|
||||||
|
err := internal.TracePartialSuccessError(n, msg)
|
||||||
|
otel.Handle(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// nil is converted to OK.
|
||||||
|
if status.Code(err) == codes.OK {
|
||||||
|
// Success.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// exportContext returns a copy of parent with an appropriate deadline and
|
||||||
|
// cancellation function.
|
||||||
|
//
|
||||||
|
// It is the callers responsibility to cancel the returned context once its
|
||||||
|
// use is complete, via the parent or directly with the returned CancelFunc, to
|
||||||
|
// ensure all resources are correctly released.
|
||||||
|
func (c *client) exportContext(parent context.Context) (context.Context, context.CancelFunc) {
|
||||||
|
var (
|
||||||
|
ctx context.Context
|
||||||
|
cancel context.CancelFunc
|
||||||
|
)
|
||||||
|
|
||||||
|
if c.exportTimeout > 0 {
|
||||||
|
ctx, cancel = context.WithTimeout(parent, c.exportTimeout)
|
||||||
|
} else {
|
||||||
|
ctx, cancel = context.WithCancel(parent)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.metadata.Len() > 0 {
|
||||||
|
ctx = metadata.NewOutgoingContext(ctx, c.metadata)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unify the client stopCtx with the parent.
|
||||||
|
go func() {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
case <-c.stopCtx.Done():
|
||||||
|
// Cancel the export as the shutdown has timed out.
|
||||||
|
cancel()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return ctx, cancel
|
||||||
|
}
|
||||||
|
|
||||||
|
// retryable returns if err identifies a request that can be retried and a
|
||||||
|
// duration to wait for if an explicit throttle time is included in err.
|
||||||
|
func retryable(err error) (bool, time.Duration) {
|
||||||
|
s := status.Convert(err)
|
||||||
|
return retryableGRPCStatus(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func retryableGRPCStatus(s *status.Status) (bool, time.Duration) {
|
||||||
|
switch s.Code() {
|
||||||
|
case codes.Canceled,
|
||||||
|
codes.DeadlineExceeded,
|
||||||
|
codes.Aborted,
|
||||||
|
codes.OutOfRange,
|
||||||
|
codes.Unavailable,
|
||||||
|
codes.DataLoss:
|
||||||
|
// Additionally handle RetryInfo.
|
||||||
|
_, d := throttleDelay(s)
|
||||||
|
return true, d
|
||||||
|
case codes.ResourceExhausted:
|
||||||
|
// Retry only if the server signals that the recovery from resource exhaustion is possible.
|
||||||
|
return throttleDelay(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Not a retry-able error.
|
||||||
|
return false, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// throttleDelay returns of the status is RetryInfo
|
||||||
|
// and the its duration to wait for if an explicit throttle time.
|
||||||
|
func throttleDelay(s *status.Status) (bool, time.Duration) {
|
||||||
|
for _, detail := range s.Details() {
|
||||||
|
if t, ok := detail.(*errdetails.RetryInfo); ok {
|
||||||
|
return true, t.RetryDelay.AsDuration()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalLog is the marshaling function used by the logging system to represent this Client.
|
||||||
|
func (c *client) MarshalLog() interface{} {
|
||||||
|
return struct {
|
||||||
|
Type string
|
||||||
|
Endpoint string
|
||||||
|
}{
|
||||||
|
Type: "otlphttpgrpc",
|
||||||
|
Endpoint: c.endpoint,
|
||||||
|
}
|
||||||
|
}
|
77
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go
generated
vendored
Normal file
77
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,77 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package otlptracegrpc provides an OTLP span exporter using gRPC.
|
||||||
|
By default the telemetry is sent to https://localhost:4317.
|
||||||
|
|
||||||
|
Exporter should be created using [New].
|
||||||
|
|
||||||
|
The environment variables described below can be used for configuration.
|
||||||
|
|
||||||
|
OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT (default: "https://localhost:4317") -
|
||||||
|
target to which the exporter sends telemetry.
|
||||||
|
The target syntax is defined in https://github.com/grpc/grpc/blob/master/doc/naming.md.
|
||||||
|
The value must contain a host.
|
||||||
|
The value may additionally a port, a scheme, and a path.
|
||||||
|
The value accepts "http" and "https" scheme.
|
||||||
|
The value should not contain a query string or fragment.
|
||||||
|
OTEL_EXPORTER_OTLP_TRACES_ENDPOINT takes precedence over OTEL_EXPORTER_OTLP_ENDPOINT.
|
||||||
|
The configuration can be overridden by [WithEndpoint], [WithInsecure], [WithGRPCConn] options.
|
||||||
|
|
||||||
|
OTEL_EXPORTER_OTLP_INSECURE, OTEL_EXPORTER_OTLP_TRACES_INSECURE (default: "false") -
|
||||||
|
setting "true" disables client transport security for the exporter's gRPC connection.
|
||||||
|
You can use this only when an endpoint is provided without the http or https scheme.
|
||||||
|
OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT setting overrides
|
||||||
|
the scheme defined via OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT.
|
||||||
|
OTEL_EXPORTER_OTLP_TRACES_INSECURE takes precedence over OTEL_EXPORTER_OTLP_INSECURE.
|
||||||
|
The configuration can be overridden by [WithInsecure], [WithGRPCConn] options.
|
||||||
|
|
||||||
|
OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_TRACES_HEADERS (default: none) -
|
||||||
|
key-value pairs used as gRPC metadata associated with gRPC requests.
|
||||||
|
The value is expected to be represented in a format matching to the [W3C Baggage HTTP Header Content Format],
|
||||||
|
except that additional semi-colon delimited metadata is not supported.
|
||||||
|
Example value: "key1=value1,key2=value2".
|
||||||
|
OTEL_EXPORTER_OTLP_TRACES_HEADERS takes precedence over OTEL_EXPORTER_OTLP_HEADERS.
|
||||||
|
The configuration can be overridden by [WithHeaders] option.
|
||||||
|
|
||||||
|
OTEL_EXPORTER_OTLP_TIMEOUT, OTEL_EXPORTER_OTLP_TRACES_TIMEOUT (default: "10000") -
|
||||||
|
maximum time in milliseconds the OTLP exporter waits for each batch export.
|
||||||
|
OTEL_EXPORTER_OTLP_TRACES_TIMEOUT takes precedence over OTEL_EXPORTER_OTLP_TIMEOUT.
|
||||||
|
The configuration can be overridden by [WithTimeout] option.
|
||||||
|
|
||||||
|
OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_TRACES_COMPRESSION (default: none) -
|
||||||
|
the gRPC compressor the exporter uses.
|
||||||
|
Supported value: "gzip".
|
||||||
|
OTEL_EXPORTER_OTLP_TRACES_COMPRESSION takes precedence over OTEL_EXPORTER_OTLP_COMPRESSION.
|
||||||
|
The configuration can be overridden by [WithCompressor], [WithGRPCConn] options.
|
||||||
|
|
||||||
|
OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE (default: none) -
|
||||||
|
the filepath to the trusted certificate to use when verifying a server's TLS credentials.
|
||||||
|
OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CERTIFICATE.
|
||||||
|
The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options.
|
||||||
|
|
||||||
|
OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE (default: none) -
|
||||||
|
the filepath to the client certificate/chain trust for clients private key to use in mTLS communication in PEM format.
|
||||||
|
OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE.
|
||||||
|
The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options.
|
||||||
|
|
||||||
|
OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY (default: none) -
|
||||||
|
the filepath to the clients private key to use in mTLS communication in PEM format.
|
||||||
|
OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY takes precedence over OTEL_EXPORTER_OTLP_CLIENT_KEY.
|
||||||
|
The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] option.
|
||||||
|
|
||||||
|
[W3C Baggage HTTP Header Content Format]: https://www.w3.org/TR/baggage/#header-content
|
||||||
|
*/
|
||||||
|
package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
|
31
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/exporter.go
generated
vendored
Normal file
31
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/exporter.go
generated
vendored
Normal file
|
@ -0,0 +1,31 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
|
||||||
|
)
|
||||||
|
|
||||||
|
// New constructs a new Exporter and starts it.
|
||||||
|
func New(ctx context.Context, opts ...Option) (*otlptrace.Exporter, error) {
|
||||||
|
return otlptrace.New(ctx, NewClient(opts...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewUnstarted constructs a new Exporter and does not start it.
|
||||||
|
func NewUnstarted(opts ...Option) *otlptrace.Exporter {
|
||||||
|
return otlptrace.NewUnstarted(NewClient(opts...))
|
||||||
|
}
|
202
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go
generated
vendored
Normal file
202
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go
generated
vendored
Normal file
|
@ -0,0 +1,202 @@
|
||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/envconfig/envconfig.go.tmpl
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"crypto/x509"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel/internal/global"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ConfigFn is the generic function used to set a config.
|
||||||
|
type ConfigFn func(*EnvOptionsReader)
|
||||||
|
|
||||||
|
// EnvOptionsReader reads the required environment variables.
|
||||||
|
type EnvOptionsReader struct {
|
||||||
|
GetEnv func(string) string
|
||||||
|
ReadFile func(string) ([]byte, error)
|
||||||
|
Namespace string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply runs every ConfigFn.
|
||||||
|
func (e *EnvOptionsReader) Apply(opts ...ConfigFn) {
|
||||||
|
for _, o := range opts {
|
||||||
|
o(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetEnvValue gets an OTLP environment variable value of the specified key
|
||||||
|
// using the GetEnv function.
|
||||||
|
// This function prepends the OTLP specified namespace to all key lookups.
|
||||||
|
func (e *EnvOptionsReader) GetEnvValue(key string) (string, bool) {
|
||||||
|
v := strings.TrimSpace(e.GetEnv(keyWithNamespace(e.Namespace, key)))
|
||||||
|
return v, v != ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithString retrieves the specified config and passes it to ConfigFn as a string.
|
||||||
|
func WithString(n string, fn func(string)) func(e *EnvOptionsReader) {
|
||||||
|
return func(e *EnvOptionsReader) {
|
||||||
|
if v, ok := e.GetEnvValue(n); ok {
|
||||||
|
fn(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithBool returns a ConfigFn that reads the environment variable n and if it exists passes its parsed bool value to fn.
|
||||||
|
func WithBool(n string, fn func(bool)) ConfigFn {
|
||||||
|
return func(e *EnvOptionsReader) {
|
||||||
|
if v, ok := e.GetEnvValue(n); ok {
|
||||||
|
b := strings.ToLower(v) == "true"
|
||||||
|
fn(b)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithDuration retrieves the specified config and passes it to ConfigFn as a duration.
|
||||||
|
func WithDuration(n string, fn func(time.Duration)) func(e *EnvOptionsReader) {
|
||||||
|
return func(e *EnvOptionsReader) {
|
||||||
|
if v, ok := e.GetEnvValue(n); ok {
|
||||||
|
d, err := strconv.Atoi(v)
|
||||||
|
if err != nil {
|
||||||
|
global.Error(err, "parse duration", "input", v)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fn(time.Duration(d) * time.Millisecond)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithHeaders retrieves the specified config and passes it to ConfigFn as a map of HTTP headers.
|
||||||
|
func WithHeaders(n string, fn func(map[string]string)) func(e *EnvOptionsReader) {
|
||||||
|
return func(e *EnvOptionsReader) {
|
||||||
|
if v, ok := e.GetEnvValue(n); ok {
|
||||||
|
fn(stringToHeader(v))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithURL retrieves the specified config and passes it to ConfigFn as a net/url.URL.
|
||||||
|
func WithURL(n string, fn func(*url.URL)) func(e *EnvOptionsReader) {
|
||||||
|
return func(e *EnvOptionsReader) {
|
||||||
|
if v, ok := e.GetEnvValue(n); ok {
|
||||||
|
u, err := url.Parse(v)
|
||||||
|
if err != nil {
|
||||||
|
global.Error(err, "parse url", "input", v)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fn(u)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithCertPool returns a ConfigFn that reads the environment variable n as a filepath to a TLS certificate pool. If it exists, it is parsed as a crypto/x509.CertPool and it is passed to fn.
|
||||||
|
func WithCertPool(n string, fn func(*x509.CertPool)) ConfigFn {
|
||||||
|
return func(e *EnvOptionsReader) {
|
||||||
|
if v, ok := e.GetEnvValue(n); ok {
|
||||||
|
b, err := e.ReadFile(v)
|
||||||
|
if err != nil {
|
||||||
|
global.Error(err, "read tls ca cert file", "file", v)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c, err := createCertPool(b)
|
||||||
|
if err != nil {
|
||||||
|
global.Error(err, "create tls cert pool")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fn(c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithClientCert returns a ConfigFn that reads the environment variable nc and nk as filepaths to a client certificate and key pair. If they exists, they are parsed as a crypto/tls.Certificate and it is passed to fn.
|
||||||
|
func WithClientCert(nc, nk string, fn func(tls.Certificate)) ConfigFn {
|
||||||
|
return func(e *EnvOptionsReader) {
|
||||||
|
vc, okc := e.GetEnvValue(nc)
|
||||||
|
vk, okk := e.GetEnvValue(nk)
|
||||||
|
if !okc || !okk {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
cert, err := e.ReadFile(vc)
|
||||||
|
if err != nil {
|
||||||
|
global.Error(err, "read tls client cert", "file", vc)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
key, err := e.ReadFile(vk)
|
||||||
|
if err != nil {
|
||||||
|
global.Error(err, "read tls client key", "file", vk)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
crt, err := tls.X509KeyPair(cert, key)
|
||||||
|
if err != nil {
|
||||||
|
global.Error(err, "create tls client key pair")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fn(crt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func keyWithNamespace(ns, key string) string {
|
||||||
|
if ns == "" {
|
||||||
|
return key
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s_%s", ns, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
func stringToHeader(value string) map[string]string {
|
||||||
|
headersPairs := strings.Split(value, ",")
|
||||||
|
headers := make(map[string]string)
|
||||||
|
|
||||||
|
for _, header := range headersPairs {
|
||||||
|
n, v, found := strings.Cut(header, "=")
|
||||||
|
if !found {
|
||||||
|
global.Error(errors.New("missing '="), "parse headers", "input", header)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
name, err := url.PathUnescape(n)
|
||||||
|
if err != nil {
|
||||||
|
global.Error(err, "escape header key", "key", n)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
trimmedName := strings.TrimSpace(name)
|
||||||
|
value, err := url.PathUnescape(v)
|
||||||
|
if err != nil {
|
||||||
|
global.Error(err, "escape header value", "value", v)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
trimmedValue := strings.TrimSpace(value)
|
||||||
|
|
||||||
|
headers[trimmedName] = trimmedValue
|
||||||
|
}
|
||||||
|
|
||||||
|
return headers
|
||||||
|
}
|
||||||
|
|
||||||
|
func createCertPool(certBytes []byte) (*x509.CertPool, error) {
|
||||||
|
cp := x509.NewCertPool()
|
||||||
|
if ok := cp.AppendCertsFromPEM(certBytes); !ok {
|
||||||
|
return nil, errors.New("failed to append certificate to the cert pool")
|
||||||
|
}
|
||||||
|
return cp, nil
|
||||||
|
}
|
35
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go
generated
vendored
Normal file
35
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go
generated
vendored
Normal file
|
@ -0,0 +1,35 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal"
|
||||||
|
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess_test.go.tmpl "--data={}" --out=partialsuccess_test.go
|
||||||
|
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry.go.tmpl "--data={}" --out=retry/retry.go
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry_test.go.tmpl "--data={}" --out=retry/retry_test.go
|
||||||
|
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig.go.tmpl "--data={}" --out=envconfig/envconfig.go
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig_test.go.tmpl "--data={}" --out=envconfig/envconfig_test.go
|
||||||
|
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig\"}" --out=otlpconfig/envconfig.go
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl "--data={\"retryImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry\"}" --out=otlpconfig/options.go
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/options_test.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig\"}" --out=otlpconfig/options_test.go
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl "--data={}" --out=otlpconfig/optiontypes.go
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl "--data={}" --out=otlpconfig/tls.go
|
||||||
|
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/client.go.tmpl "--data={}" --out=otlptracetest/client.go
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/collector.go.tmpl "--data={}" --out=otlptracetest/collector.go
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/data.go.tmpl "--data={}" --out=otlptracetest/data.go
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/otlptest.go.tmpl "--data={}" --out=otlptracetest/otlptest.go
|
153
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go
generated
vendored
Normal file
153
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go
generated
vendored
Normal file
|
@ -0,0 +1,153 @@
|
||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"crypto/x509"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DefaultEnvOptionsReader is the default environments reader.
|
||||||
|
var DefaultEnvOptionsReader = envconfig.EnvOptionsReader{
|
||||||
|
GetEnv: os.Getenv,
|
||||||
|
ReadFile: os.ReadFile,
|
||||||
|
Namespace: "OTEL_EXPORTER_OTLP",
|
||||||
|
}
|
||||||
|
|
||||||
|
// ApplyGRPCEnvConfigs applies the env configurations for gRPC.
|
||||||
|
func ApplyGRPCEnvConfigs(cfg Config) Config {
|
||||||
|
opts := getOptionsFromEnv()
|
||||||
|
for _, opt := range opts {
|
||||||
|
cfg = opt.ApplyGRPCOption(cfg)
|
||||||
|
}
|
||||||
|
return cfg
|
||||||
|
}
|
||||||
|
|
||||||
|
// ApplyHTTPEnvConfigs applies the env configurations for HTTP.
|
||||||
|
func ApplyHTTPEnvConfigs(cfg Config) Config {
|
||||||
|
opts := getOptionsFromEnv()
|
||||||
|
for _, opt := range opts {
|
||||||
|
cfg = opt.ApplyHTTPOption(cfg)
|
||||||
|
}
|
||||||
|
return cfg
|
||||||
|
}
|
||||||
|
|
||||||
|
func getOptionsFromEnv() []GenericOption {
|
||||||
|
opts := []GenericOption{}
|
||||||
|
|
||||||
|
tlsConf := &tls.Config{}
|
||||||
|
DefaultEnvOptionsReader.Apply(
|
||||||
|
envconfig.WithURL("ENDPOINT", func(u *url.URL) {
|
||||||
|
opts = append(opts, withEndpointScheme(u))
|
||||||
|
opts = append(opts, newSplitOption(func(cfg Config) Config {
|
||||||
|
cfg.Traces.Endpoint = u.Host
|
||||||
|
// For OTLP/HTTP endpoint URLs without a per-signal
|
||||||
|
// configuration, the passed endpoint is used as a base URL
|
||||||
|
// and the signals are sent to these paths relative to that.
|
||||||
|
cfg.Traces.URLPath = path.Join(u.Path, DefaultTracesPath)
|
||||||
|
return cfg
|
||||||
|
}, withEndpointForGRPC(u)))
|
||||||
|
}),
|
||||||
|
envconfig.WithURL("TRACES_ENDPOINT", func(u *url.URL) {
|
||||||
|
opts = append(opts, withEndpointScheme(u))
|
||||||
|
opts = append(opts, newSplitOption(func(cfg Config) Config {
|
||||||
|
cfg.Traces.Endpoint = u.Host
|
||||||
|
// For endpoint URLs for OTLP/HTTP per-signal variables, the
|
||||||
|
// URL MUST be used as-is without any modification. The only
|
||||||
|
// exception is that if an URL contains no path part, the root
|
||||||
|
// path / MUST be used.
|
||||||
|
path := u.Path
|
||||||
|
if path == "" {
|
||||||
|
path = "/"
|
||||||
|
}
|
||||||
|
cfg.Traces.URLPath = path
|
||||||
|
return cfg
|
||||||
|
}, withEndpointForGRPC(u)))
|
||||||
|
}),
|
||||||
|
envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
|
||||||
|
envconfig.WithCertPool("TRACES_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
|
||||||
|
envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
|
||||||
|
envconfig.WithClientCert("TRACES_CLIENT_CERTIFICATE", "TRACES_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
|
||||||
|
withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }),
|
||||||
|
envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
|
||||||
|
envconfig.WithBool("TRACES_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
|
||||||
|
envconfig.WithHeaders("HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
|
||||||
|
envconfig.WithHeaders("TRACES_HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
|
||||||
|
WithEnvCompression("COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
|
||||||
|
WithEnvCompression("TRACES_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
|
||||||
|
envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
|
||||||
|
envconfig.WithDuration("TRACES_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
|
||||||
|
)
|
||||||
|
|
||||||
|
return opts
|
||||||
|
}
|
||||||
|
|
||||||
|
func withEndpointScheme(u *url.URL) GenericOption {
|
||||||
|
switch strings.ToLower(u.Scheme) {
|
||||||
|
case "http", "unix":
|
||||||
|
return WithInsecure()
|
||||||
|
default:
|
||||||
|
return WithSecure()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func withEndpointForGRPC(u *url.URL) func(cfg Config) Config {
|
||||||
|
return func(cfg Config) Config {
|
||||||
|
// For OTLP/gRPC endpoints, this is the target to which the
|
||||||
|
// exporter is going to send telemetry.
|
||||||
|
cfg.Traces.Endpoint = path.Join(u.Host, u.Path)
|
||||||
|
return cfg
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithEnvCompression retrieves the specified config and passes it to ConfigFn as a Compression.
|
||||||
|
func WithEnvCompression(n string, fn func(Compression)) func(e *envconfig.EnvOptionsReader) {
|
||||||
|
return func(e *envconfig.EnvOptionsReader) {
|
||||||
|
if v, ok := e.GetEnvValue(n); ok {
|
||||||
|
cp := NoCompression
|
||||||
|
if v == "gzip" {
|
||||||
|
cp = GzipCompression
|
||||||
|
}
|
||||||
|
|
||||||
|
fn(cp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// revive:disable-next-line:flag-parameter
|
||||||
|
func withInsecure(b bool) GenericOption {
|
||||||
|
if b {
|
||||||
|
return WithInsecure()
|
||||||
|
}
|
||||||
|
return WithSecure()
|
||||||
|
}
|
||||||
|
|
||||||
|
func withTLSConfig(c *tls.Config, fn func(*tls.Config)) func(e *envconfig.EnvOptionsReader) {
|
||||||
|
return func(e *envconfig.EnvOptionsReader) {
|
||||||
|
if c.RootCAs != nil || len(c.Certificates) > 0 {
|
||||||
|
fn(c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
325
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go
generated
vendored
Normal file
325
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go
generated
vendored
Normal file
|
@ -0,0 +1,325 @@
|
||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"fmt"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/backoff"
|
||||||
|
"google.golang.org/grpc/credentials"
|
||||||
|
"google.golang.org/grpc/credentials/insecure"
|
||||||
|
"google.golang.org/grpc/encoding/gzip"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
|
||||||
|
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// DefaultTracesPath is a default URL path for endpoint that
|
||||||
|
// receives spans.
|
||||||
|
DefaultTracesPath string = "/v1/traces"
|
||||||
|
// DefaultTimeout is a default max waiting time for the backend to process
|
||||||
|
// each span batch.
|
||||||
|
DefaultTimeout time.Duration = 10 * time.Second
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
SignalConfig struct {
|
||||||
|
Endpoint string
|
||||||
|
Insecure bool
|
||||||
|
TLSCfg *tls.Config
|
||||||
|
Headers map[string]string
|
||||||
|
Compression Compression
|
||||||
|
Timeout time.Duration
|
||||||
|
URLPath string
|
||||||
|
|
||||||
|
// gRPC configurations
|
||||||
|
GRPCCredentials credentials.TransportCredentials
|
||||||
|
}
|
||||||
|
|
||||||
|
Config struct {
|
||||||
|
// Signal specific configurations
|
||||||
|
Traces SignalConfig
|
||||||
|
|
||||||
|
RetryConfig retry.Config
|
||||||
|
|
||||||
|
// gRPC configurations
|
||||||
|
ReconnectionPeriod time.Duration
|
||||||
|
ServiceConfig string
|
||||||
|
DialOptions []grpc.DialOption
|
||||||
|
GRPCConn *grpc.ClientConn
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewHTTPConfig returns a new Config with all settings applied from opts and
|
||||||
|
// any unset setting using the default HTTP config values.
|
||||||
|
func NewHTTPConfig(opts ...HTTPOption) Config {
|
||||||
|
cfg := Config{
|
||||||
|
Traces: SignalConfig{
|
||||||
|
Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorHTTPPort),
|
||||||
|
URLPath: DefaultTracesPath,
|
||||||
|
Compression: NoCompression,
|
||||||
|
Timeout: DefaultTimeout,
|
||||||
|
},
|
||||||
|
RetryConfig: retry.DefaultConfig,
|
||||||
|
}
|
||||||
|
cfg = ApplyHTTPEnvConfigs(cfg)
|
||||||
|
for _, opt := range opts {
|
||||||
|
cfg = opt.ApplyHTTPOption(cfg)
|
||||||
|
}
|
||||||
|
cfg.Traces.URLPath = cleanPath(cfg.Traces.URLPath, DefaultTracesPath)
|
||||||
|
return cfg
|
||||||
|
}
|
||||||
|
|
||||||
|
// cleanPath returns a path with all spaces trimmed and all redundancies
|
||||||
|
// removed. If urlPath is empty or cleaning it results in an empty string,
|
||||||
|
// defaultPath is returned instead.
|
||||||
|
func cleanPath(urlPath string, defaultPath string) string {
|
||||||
|
tmp := path.Clean(strings.TrimSpace(urlPath))
|
||||||
|
if tmp == "." {
|
||||||
|
return defaultPath
|
||||||
|
}
|
||||||
|
if !path.IsAbs(tmp) {
|
||||||
|
tmp = fmt.Sprintf("/%s", tmp)
|
||||||
|
}
|
||||||
|
return tmp
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGRPCConfig returns a new Config with all settings applied from opts and
|
||||||
|
// any unset setting using the default gRPC config values.
|
||||||
|
func NewGRPCConfig(opts ...GRPCOption) Config {
|
||||||
|
userAgent := "OTel OTLP Exporter Go/" + otlptrace.Version()
|
||||||
|
cfg := Config{
|
||||||
|
Traces: SignalConfig{
|
||||||
|
Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort),
|
||||||
|
URLPath: DefaultTracesPath,
|
||||||
|
Compression: NoCompression,
|
||||||
|
Timeout: DefaultTimeout,
|
||||||
|
},
|
||||||
|
RetryConfig: retry.DefaultConfig,
|
||||||
|
DialOptions: []grpc.DialOption{grpc.WithUserAgent(userAgent)},
|
||||||
|
}
|
||||||
|
cfg = ApplyGRPCEnvConfigs(cfg)
|
||||||
|
for _, opt := range opts {
|
||||||
|
cfg = opt.ApplyGRPCOption(cfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
if cfg.ServiceConfig != "" {
|
||||||
|
cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig))
|
||||||
|
}
|
||||||
|
// Priroritize GRPCCredentials over Insecure (passing both is an error).
|
||||||
|
if cfg.Traces.GRPCCredentials != nil {
|
||||||
|
cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Traces.GRPCCredentials))
|
||||||
|
} else if cfg.Traces.Insecure {
|
||||||
|
cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||||
|
} else {
|
||||||
|
// Default to using the host's root CA.
|
||||||
|
creds := credentials.NewTLS(nil)
|
||||||
|
cfg.Traces.GRPCCredentials = creds
|
||||||
|
cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(creds))
|
||||||
|
}
|
||||||
|
if cfg.Traces.Compression == GzipCompression {
|
||||||
|
cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name)))
|
||||||
|
}
|
||||||
|
if cfg.ReconnectionPeriod != 0 {
|
||||||
|
p := grpc.ConnectParams{
|
||||||
|
Backoff: backoff.DefaultConfig,
|
||||||
|
MinConnectTimeout: cfg.ReconnectionPeriod,
|
||||||
|
}
|
||||||
|
cfg.DialOptions = append(cfg.DialOptions, grpc.WithConnectParams(p))
|
||||||
|
}
|
||||||
|
|
||||||
|
return cfg
|
||||||
|
}
|
||||||
|
|
||||||
|
type (
|
||||||
|
// GenericOption applies an option to the HTTP or gRPC driver.
|
||||||
|
GenericOption interface {
|
||||||
|
ApplyHTTPOption(Config) Config
|
||||||
|
ApplyGRPCOption(Config) Config
|
||||||
|
|
||||||
|
// A private method to prevent users implementing the
|
||||||
|
// interface and so future additions to it will not
|
||||||
|
// violate compatibility.
|
||||||
|
private()
|
||||||
|
}
|
||||||
|
|
||||||
|
// HTTPOption applies an option to the HTTP driver.
|
||||||
|
HTTPOption interface {
|
||||||
|
ApplyHTTPOption(Config) Config
|
||||||
|
|
||||||
|
// A private method to prevent users implementing the
|
||||||
|
// interface and so future additions to it will not
|
||||||
|
// violate compatibility.
|
||||||
|
private()
|
||||||
|
}
|
||||||
|
|
||||||
|
// GRPCOption applies an option to the gRPC driver.
|
||||||
|
GRPCOption interface {
|
||||||
|
ApplyGRPCOption(Config) Config
|
||||||
|
|
||||||
|
// A private method to prevent users implementing the
|
||||||
|
// interface and so future additions to it will not
|
||||||
|
// violate compatibility.
|
||||||
|
private()
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// genericOption is an option that applies the same logic
|
||||||
|
// for both gRPC and HTTP.
|
||||||
|
type genericOption struct {
|
||||||
|
fn func(Config) Config
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *genericOption) ApplyGRPCOption(cfg Config) Config {
|
||||||
|
return g.fn(cfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *genericOption) ApplyHTTPOption(cfg Config) Config {
|
||||||
|
return g.fn(cfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (genericOption) private() {}
|
||||||
|
|
||||||
|
func newGenericOption(fn func(cfg Config) Config) GenericOption {
|
||||||
|
return &genericOption{fn: fn}
|
||||||
|
}
|
||||||
|
|
||||||
|
// splitOption is an option that applies different logics
|
||||||
|
// for gRPC and HTTP.
|
||||||
|
type splitOption struct {
|
||||||
|
httpFn func(Config) Config
|
||||||
|
grpcFn func(Config) Config
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *splitOption) ApplyGRPCOption(cfg Config) Config {
|
||||||
|
return g.grpcFn(cfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *splitOption) ApplyHTTPOption(cfg Config) Config {
|
||||||
|
return g.httpFn(cfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (splitOption) private() {}
|
||||||
|
|
||||||
|
func newSplitOption(httpFn func(cfg Config) Config, grpcFn func(cfg Config) Config) GenericOption {
|
||||||
|
return &splitOption{httpFn: httpFn, grpcFn: grpcFn}
|
||||||
|
}
|
||||||
|
|
||||||
|
// httpOption is an option that is only applied to the HTTP driver.
|
||||||
|
type httpOption struct {
|
||||||
|
fn func(Config) Config
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *httpOption) ApplyHTTPOption(cfg Config) Config {
|
||||||
|
return h.fn(cfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (httpOption) private() {}
|
||||||
|
|
||||||
|
func NewHTTPOption(fn func(cfg Config) Config) HTTPOption {
|
||||||
|
return &httpOption{fn: fn}
|
||||||
|
}
|
||||||
|
|
||||||
|
// grpcOption is an option that is only applied to the gRPC driver.
|
||||||
|
type grpcOption struct {
|
||||||
|
fn func(Config) Config
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *grpcOption) ApplyGRPCOption(cfg Config) Config {
|
||||||
|
return h.fn(cfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (grpcOption) private() {}
|
||||||
|
|
||||||
|
func NewGRPCOption(fn func(cfg Config) Config) GRPCOption {
|
||||||
|
return &grpcOption{fn: fn}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generic Options
|
||||||
|
|
||||||
|
func WithEndpoint(endpoint string) GenericOption {
|
||||||
|
return newGenericOption(func(cfg Config) Config {
|
||||||
|
cfg.Traces.Endpoint = endpoint
|
||||||
|
return cfg
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithCompression(compression Compression) GenericOption {
|
||||||
|
return newGenericOption(func(cfg Config) Config {
|
||||||
|
cfg.Traces.Compression = compression
|
||||||
|
return cfg
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithURLPath(urlPath string) GenericOption {
|
||||||
|
return newGenericOption(func(cfg Config) Config {
|
||||||
|
cfg.Traces.URLPath = urlPath
|
||||||
|
return cfg
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithRetry(rc retry.Config) GenericOption {
|
||||||
|
return newGenericOption(func(cfg Config) Config {
|
||||||
|
cfg.RetryConfig = rc
|
||||||
|
return cfg
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithTLSClientConfig(tlsCfg *tls.Config) GenericOption {
|
||||||
|
return newSplitOption(func(cfg Config) Config {
|
||||||
|
cfg.Traces.TLSCfg = tlsCfg.Clone()
|
||||||
|
return cfg
|
||||||
|
}, func(cfg Config) Config {
|
||||||
|
cfg.Traces.GRPCCredentials = credentials.NewTLS(tlsCfg)
|
||||||
|
return cfg
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithInsecure() GenericOption {
|
||||||
|
return newGenericOption(func(cfg Config) Config {
|
||||||
|
cfg.Traces.Insecure = true
|
||||||
|
return cfg
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithSecure() GenericOption {
|
||||||
|
return newGenericOption(func(cfg Config) Config {
|
||||||
|
cfg.Traces.Insecure = false
|
||||||
|
return cfg
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithHeaders(headers map[string]string) GenericOption {
|
||||||
|
return newGenericOption(func(cfg Config) Config {
|
||||||
|
cfg.Traces.Headers = headers
|
||||||
|
return cfg
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithTimeout(duration time.Duration) GenericOption {
|
||||||
|
return newGenericOption(func(cfg Config) Config {
|
||||||
|
cfg.Traces.Timeout = duration
|
||||||
|
return cfg
|
||||||
|
})
|
||||||
|
}
|
51
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go
generated
vendored
Normal file
51
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go
generated
vendored
Normal file
|
@ -0,0 +1,51 @@
|
||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
|
||||||
|
|
||||||
|
const (
|
||||||
|
// DefaultCollectorGRPCPort is the default gRPC port of the collector.
|
||||||
|
DefaultCollectorGRPCPort uint16 = 4317
|
||||||
|
// DefaultCollectorHTTPPort is the default HTTP port of the collector.
|
||||||
|
DefaultCollectorHTTPPort uint16 = 4318
|
||||||
|
// DefaultCollectorHost is the host address the Exporter will attempt
|
||||||
|
// connect to if no collector address is provided.
|
||||||
|
DefaultCollectorHost string = "localhost"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Compression describes the compression used for payloads sent to the
|
||||||
|
// collector.
|
||||||
|
type Compression int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// NoCompression tells the driver to send payloads without
|
||||||
|
// compression.
|
||||||
|
NoCompression Compression = iota
|
||||||
|
// GzipCompression tells the driver to send payloads after
|
||||||
|
// compressing them with gzip.
|
||||||
|
GzipCompression
|
||||||
|
)
|
||||||
|
|
||||||
|
// Marshaler describes the kind of message format sent to the collector.
|
||||||
|
type Marshaler int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// MarshalProto tells the driver to send using the protobuf binary format.
|
||||||
|
MarshalProto Marshaler = iota
|
||||||
|
// MarshalJSON tells the driver to send using json format.
|
||||||
|
MarshalJSON
|
||||||
|
)
|
37
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go
generated
vendored
Normal file
37
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go
generated
vendored
Normal file
|
@ -0,0 +1,37 @@
|
||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"crypto/x509"
|
||||||
|
"errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CreateTLSConfig creates a tls.Config from a raw certificate bytes
|
||||||
|
// to verify a server certificate.
|
||||||
|
func CreateTLSConfig(certBytes []byte) (*tls.Config, error) {
|
||||||
|
cp := x509.NewCertPool()
|
||||||
|
if ok := cp.AppendCertsFromPEM(certBytes); !ok {
|
||||||
|
return nil, errors.New("failed to append certificate to the cert pool")
|
||||||
|
}
|
||||||
|
|
||||||
|
return &tls.Config{
|
||||||
|
RootCAs: cp,
|
||||||
|
}, nil
|
||||||
|
}
|
67
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go
generated
vendored
Normal file
67
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go
generated
vendored
Normal file
|
@ -0,0 +1,67 @@
|
||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/partialsuccess.go
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal"
|
||||||
|
|
||||||
|
import "fmt"
|
||||||
|
|
||||||
|
// PartialSuccess represents the underlying error for all handling
|
||||||
|
// OTLP partial success messages. Use `errors.Is(err,
|
||||||
|
// PartialSuccess{})` to test whether an error passed to the OTel
|
||||||
|
// error handler belongs to this category.
|
||||||
|
type PartialSuccess struct {
|
||||||
|
ErrorMessage string
|
||||||
|
RejectedItems int64
|
||||||
|
RejectedKind string
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ error = PartialSuccess{}
|
||||||
|
|
||||||
|
// Error implements the error interface.
|
||||||
|
func (ps PartialSuccess) Error() string {
|
||||||
|
msg := ps.ErrorMessage
|
||||||
|
if msg == "" {
|
||||||
|
msg = "empty message"
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Is supports the errors.Is() interface.
|
||||||
|
func (ps PartialSuccess) Is(err error) bool {
|
||||||
|
_, ok := err.(PartialSuccess)
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// TracePartialSuccessError returns an error describing a partial success
|
||||||
|
// response for the trace signal.
|
||||||
|
func TracePartialSuccessError(itemsRejected int64, errorMessage string) error {
|
||||||
|
return PartialSuccess{
|
||||||
|
ErrorMessage: errorMessage,
|
||||||
|
RejectedItems: itemsRejected,
|
||||||
|
RejectedKind: "spans",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MetricPartialSuccessError returns an error describing a partial success
|
||||||
|
// response for the metric signal.
|
||||||
|
func MetricPartialSuccessError(itemsRejected int64, errorMessage string) error {
|
||||||
|
return PartialSuccess{
|
||||||
|
ErrorMessage: errorMessage,
|
||||||
|
RejectedItems: itemsRejected,
|
||||||
|
RejectedKind: "metric data points",
|
||||||
|
}
|
||||||
|
}
|
156
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go
generated
vendored
Normal file
156
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go
generated
vendored
Normal file
|
@ -0,0 +1,156 @@
|
||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/retry/retry.go.tmpl
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package retry provides request retry functionality that can perform
|
||||||
|
// configurable exponential backoff for transient errors and honor any
|
||||||
|
// explicit throttle responses received.
|
||||||
|
package retry // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/cenkalti/backoff/v4"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DefaultConfig are the recommended defaults to use.
|
||||||
|
var DefaultConfig = Config{
|
||||||
|
Enabled: true,
|
||||||
|
InitialInterval: 5 * time.Second,
|
||||||
|
MaxInterval: 30 * time.Second,
|
||||||
|
MaxElapsedTime: time.Minute,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Config defines configuration for retrying batches in case of export failure
|
||||||
|
// using an exponential backoff.
|
||||||
|
type Config struct {
|
||||||
|
// Enabled indicates whether to not retry sending batches in case of
|
||||||
|
// export failure.
|
||||||
|
Enabled bool
|
||||||
|
// InitialInterval the time to wait after the first failure before
|
||||||
|
// retrying.
|
||||||
|
InitialInterval time.Duration
|
||||||
|
// MaxInterval is the upper bound on backoff interval. Once this value is
|
||||||
|
// reached the delay between consecutive retries will always be
|
||||||
|
// `MaxInterval`.
|
||||||
|
MaxInterval time.Duration
|
||||||
|
// MaxElapsedTime is the maximum amount of time (including retries) spent
|
||||||
|
// trying to send a request/batch. Once this value is reached, the data
|
||||||
|
// is discarded.
|
||||||
|
MaxElapsedTime time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequestFunc wraps a request with retry logic.
|
||||||
|
type RequestFunc func(context.Context, func(context.Context) error) error
|
||||||
|
|
||||||
|
// EvaluateFunc returns if an error is retry-able and if an explicit throttle
|
||||||
|
// duration should be honored that was included in the error.
|
||||||
|
//
|
||||||
|
// The function must return true if the error argument is retry-able,
|
||||||
|
// otherwise it must return false for the first return parameter.
|
||||||
|
//
|
||||||
|
// The function must return a non-zero time.Duration if the error contains
|
||||||
|
// explicit throttle duration that should be honored, otherwise it must return
|
||||||
|
// a zero valued time.Duration.
|
||||||
|
type EvaluateFunc func(error) (bool, time.Duration)
|
||||||
|
|
||||||
|
// RequestFunc returns a RequestFunc using the evaluate function to determine
|
||||||
|
// if requests can be retried and based on the exponential backoff
|
||||||
|
// configuration of c.
|
||||||
|
func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
|
||||||
|
if !c.Enabled {
|
||||||
|
return func(ctx context.Context, fn func(context.Context) error) error {
|
||||||
|
return fn(ctx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return func(ctx context.Context, fn func(context.Context) error) error {
|
||||||
|
// Do not use NewExponentialBackOff since it calls Reset and the code here
|
||||||
|
// must call Reset after changing the InitialInterval (this saves an
|
||||||
|
// unnecessary call to Now).
|
||||||
|
b := &backoff.ExponentialBackOff{
|
||||||
|
InitialInterval: c.InitialInterval,
|
||||||
|
RandomizationFactor: backoff.DefaultRandomizationFactor,
|
||||||
|
Multiplier: backoff.DefaultMultiplier,
|
||||||
|
MaxInterval: c.MaxInterval,
|
||||||
|
MaxElapsedTime: c.MaxElapsedTime,
|
||||||
|
Stop: backoff.Stop,
|
||||||
|
Clock: backoff.SystemClock,
|
||||||
|
}
|
||||||
|
b.Reset()
|
||||||
|
|
||||||
|
for {
|
||||||
|
err := fn(ctx)
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
retryable, throttle := evaluate(err)
|
||||||
|
if !retryable {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
bOff := b.NextBackOff()
|
||||||
|
if bOff == backoff.Stop {
|
||||||
|
return fmt.Errorf("max retry time elapsed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for the greater of the backoff or throttle delay.
|
||||||
|
var delay time.Duration
|
||||||
|
if bOff > throttle {
|
||||||
|
delay = bOff
|
||||||
|
} else {
|
||||||
|
elapsed := b.GetElapsedTime()
|
||||||
|
if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime {
|
||||||
|
return fmt.Errorf("max retry time would elapse: %w", err)
|
||||||
|
}
|
||||||
|
delay = throttle
|
||||||
|
}
|
||||||
|
|
||||||
|
if ctxErr := waitFunc(ctx, delay); ctxErr != nil {
|
||||||
|
return fmt.Errorf("%w: %s", ctxErr, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Allow override for testing.
|
||||||
|
var waitFunc = wait
|
||||||
|
|
||||||
|
// wait takes the caller's context, and the amount of time to wait. It will
|
||||||
|
// return nil if the timer fires before or at the same time as the context's
|
||||||
|
// deadline. This indicates that the call can be retried.
|
||||||
|
func wait(ctx context.Context, delay time.Duration) error {
|
||||||
|
timer := time.NewTimer(delay)
|
||||||
|
defer timer.Stop()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
// Handle the case where the timer and context deadline end
|
||||||
|
// simultaneously by prioritizing the timer expiration nil value
|
||||||
|
// response.
|
||||||
|
select {
|
||||||
|
case <-timer.C:
|
||||||
|
default:
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
case <-timer.C:
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
183
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go
generated
vendored
Normal file
183
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go
generated
vendored
Normal file
|
@ -0,0 +1,183 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/credentials"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel"
|
||||||
|
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
|
||||||
|
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Option applies an option to the gRPC driver.
|
||||||
|
type Option interface {
|
||||||
|
applyGRPCOption(otlpconfig.Config) otlpconfig.Config
|
||||||
|
}
|
||||||
|
|
||||||
|
func asGRPCOptions(opts []Option) []otlpconfig.GRPCOption {
|
||||||
|
converted := make([]otlpconfig.GRPCOption, len(opts))
|
||||||
|
for i, o := range opts {
|
||||||
|
converted[i] = otlpconfig.NewGRPCOption(o.applyGRPCOption)
|
||||||
|
}
|
||||||
|
return converted
|
||||||
|
}
|
||||||
|
|
||||||
|
// RetryConfig defines configuration for retrying export of span batches that
|
||||||
|
// failed to be received by the target endpoint.
|
||||||
|
//
|
||||||
|
// This configuration does not define any network retry strategy. That is
|
||||||
|
// entirely handled by the gRPC ClientConn.
|
||||||
|
type RetryConfig retry.Config
|
||||||
|
|
||||||
|
type wrappedOption struct {
|
||||||
|
otlpconfig.GRPCOption
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w wrappedOption) applyGRPCOption(cfg otlpconfig.Config) otlpconfig.Config {
|
||||||
|
return w.ApplyGRPCOption(cfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithInsecure disables client transport security for the exporter's gRPC
|
||||||
|
// connection just like grpc.WithInsecure()
|
||||||
|
// (https://pkg.go.dev/google.golang.org/grpc#WithInsecure) does. Note, by
|
||||||
|
// default, client security is required unless WithInsecure is used.
|
||||||
|
//
|
||||||
|
// This option has no effect if WithGRPCConn is used.
|
||||||
|
func WithInsecure() Option {
|
||||||
|
return wrappedOption{otlpconfig.WithInsecure()}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithEndpoint sets the target endpoint the exporter will connect to. If
|
||||||
|
// unset, localhost:4317 will be used as a default.
|
||||||
|
//
|
||||||
|
// This option has no effect if WithGRPCConn is used.
|
||||||
|
func WithEndpoint(endpoint string) Option {
|
||||||
|
return wrappedOption{otlpconfig.WithEndpoint(endpoint)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithReconnectionPeriod set the minimum amount of time between connection
|
||||||
|
// attempts to the target endpoint.
|
||||||
|
//
|
||||||
|
// This option has no effect if WithGRPCConn is used.
|
||||||
|
func WithReconnectionPeriod(rp time.Duration) Option {
|
||||||
|
return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
|
||||||
|
cfg.ReconnectionPeriod = rp
|
||||||
|
return cfg
|
||||||
|
})}
|
||||||
|
}
|
||||||
|
|
||||||
|
func compressorToCompression(compressor string) otlpconfig.Compression {
|
||||||
|
if compressor == "gzip" {
|
||||||
|
return otlpconfig.GzipCompression
|
||||||
|
}
|
||||||
|
|
||||||
|
otel.Handle(fmt.Errorf("invalid compression type: '%s', using no compression as default", compressor))
|
||||||
|
return otlpconfig.NoCompression
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithCompressor sets the compressor for the gRPC client to use when sending
|
||||||
|
// requests. Supported compressor values: "gzip".
|
||||||
|
func WithCompressor(compressor string) Option {
|
||||||
|
return wrappedOption{otlpconfig.WithCompression(compressorToCompression(compressor))}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithHeaders will send the provided headers with each gRPC requests.
|
||||||
|
func WithHeaders(headers map[string]string) Option {
|
||||||
|
return wrappedOption{otlpconfig.WithHeaders(headers)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithTLSCredentials allows the connection to use TLS credentials when
|
||||||
|
// talking to the server. It takes in grpc.TransportCredentials instead of say
|
||||||
|
// a Certificate file or a tls.Certificate, because the retrieving of these
|
||||||
|
// credentials can be done in many ways e.g. plain file, in code tls.Config or
|
||||||
|
// by certificate rotation, so it is up to the caller to decide what to use.
|
||||||
|
//
|
||||||
|
// This option has no effect if WithGRPCConn is used.
|
||||||
|
func WithTLSCredentials(creds credentials.TransportCredentials) Option {
|
||||||
|
return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
|
||||||
|
cfg.Traces.GRPCCredentials = creds
|
||||||
|
return cfg
|
||||||
|
})}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithServiceConfig defines the default gRPC service config used.
|
||||||
|
//
|
||||||
|
// This option has no effect if WithGRPCConn is used.
|
||||||
|
func WithServiceConfig(serviceConfig string) Option {
|
||||||
|
return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
|
||||||
|
cfg.ServiceConfig = serviceConfig
|
||||||
|
return cfg
|
||||||
|
})}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithDialOption sets explicit grpc.DialOptions to use when making a
|
||||||
|
// connection. The options here are appended to the internal grpc.DialOptions
|
||||||
|
// used so they will take precedence over any other internal grpc.DialOptions
|
||||||
|
// they might conflict with.
|
||||||
|
//
|
||||||
|
// This option has no effect if WithGRPCConn is used.
|
||||||
|
func WithDialOption(opts ...grpc.DialOption) Option {
|
||||||
|
return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
|
||||||
|
cfg.DialOptions = opts
|
||||||
|
return cfg
|
||||||
|
})}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithGRPCConn sets conn as the gRPC ClientConn used for all communication.
|
||||||
|
//
|
||||||
|
// This option takes precedence over any other option that relates to
|
||||||
|
// establishing or persisting a gRPC connection to a target endpoint. Any
|
||||||
|
// other option of those types passed will be ignored.
|
||||||
|
//
|
||||||
|
// It is the callers responsibility to close the passed conn. The client
|
||||||
|
// Shutdown method will not close this connection.
|
||||||
|
func WithGRPCConn(conn *grpc.ClientConn) Option {
|
||||||
|
return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
|
||||||
|
cfg.GRPCConn = conn
|
||||||
|
return cfg
|
||||||
|
})}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithTimeout sets the max amount of time a client will attempt to export a
|
||||||
|
// batch of spans. This takes precedence over any retry settings defined with
|
||||||
|
// WithRetry, once this time limit has been reached the export is abandoned
|
||||||
|
// and the batch of spans is dropped.
|
||||||
|
//
|
||||||
|
// If unset, the default timeout will be set to 10 seconds.
|
||||||
|
func WithTimeout(duration time.Duration) Option {
|
||||||
|
return wrappedOption{otlpconfig.WithTimeout(duration)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithRetry sets the retry policy for transient retryable errors that may be
|
||||||
|
// returned by the target endpoint when exporting a batch of spans.
|
||||||
|
//
|
||||||
|
// If the target endpoint responds with not only a retryable error, but
|
||||||
|
// explicitly returns a backoff time in the response. That time will take
|
||||||
|
// precedence over these settings.
|
||||||
|
//
|
||||||
|
// These settings do not define any network retry strategy. That is entirely
|
||||||
|
// handled by the gRPC ClientConn.
|
||||||
|
//
|
||||||
|
// If unset, the default retry policy will be used. It will retry the export
|
||||||
|
// 5 seconds after receiving a retryable error and increase exponentially
|
||||||
|
// after each error for no more than a total time of 1 minute.
|
||||||
|
func WithRetry(settings RetryConfig) Option {
|
||||||
|
return wrappedOption{otlpconfig.WithRetry(retry.Config(settings))}
|
||||||
|
}
|
20
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go
generated
vendored
Normal file
20
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
|
||||||
|
|
||||||
|
// Version is the current release version of the OpenTelemetry OTLP trace exporter in use.
|
||||||
|
func Version() string {
|
||||||
|
return "1.21.0"
|
||||||
|
}
|
|
@ -0,0 +1,264 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package noop provides an implementation of the OpenTelemetry metric API that
|
||||||
|
// produces no telemetry and minimizes used computation resources.
|
||||||
|
//
|
||||||
|
// Using this package to implement the OpenTelemetry metric API will
|
||||||
|
// effectively disable OpenTelemetry.
|
||||||
|
//
|
||||||
|
// This implementation can be embedded in other implementations of the
|
||||||
|
// OpenTelemetry metric API. Doing so will mean the implementation defaults to
|
||||||
|
// no operation for methods it does not implement.
|
||||||
|
package noop // import "go.opentelemetry.io/otel/metric/noop"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel/metric"
|
||||||
|
"go.opentelemetry.io/otel/metric/embedded"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// Compile-time check this implements the OpenTelemetry API.
|
||||||
|
|
||||||
|
_ metric.MeterProvider = MeterProvider{}
|
||||||
|
_ metric.Meter = Meter{}
|
||||||
|
_ metric.Observer = Observer{}
|
||||||
|
_ metric.Registration = Registration{}
|
||||||
|
_ metric.Int64Counter = Int64Counter{}
|
||||||
|
_ metric.Float64Counter = Float64Counter{}
|
||||||
|
_ metric.Int64UpDownCounter = Int64UpDownCounter{}
|
||||||
|
_ metric.Float64UpDownCounter = Float64UpDownCounter{}
|
||||||
|
_ metric.Int64Histogram = Int64Histogram{}
|
||||||
|
_ metric.Float64Histogram = Float64Histogram{}
|
||||||
|
_ metric.Int64ObservableCounter = Int64ObservableCounter{}
|
||||||
|
_ metric.Float64ObservableCounter = Float64ObservableCounter{}
|
||||||
|
_ metric.Int64ObservableGauge = Int64ObservableGauge{}
|
||||||
|
_ metric.Float64ObservableGauge = Float64ObservableGauge{}
|
||||||
|
_ metric.Int64ObservableUpDownCounter = Int64ObservableUpDownCounter{}
|
||||||
|
_ metric.Float64ObservableUpDownCounter = Float64ObservableUpDownCounter{}
|
||||||
|
_ metric.Int64Observer = Int64Observer{}
|
||||||
|
_ metric.Float64Observer = Float64Observer{}
|
||||||
|
)
|
||||||
|
|
||||||
|
// MeterProvider is an OpenTelemetry No-Op MeterProvider.
|
||||||
|
type MeterProvider struct{ embedded.MeterProvider }
|
||||||
|
|
||||||
|
// NewMeterProvider returns a MeterProvider that does not record any telemetry.
|
||||||
|
func NewMeterProvider() MeterProvider {
|
||||||
|
return MeterProvider{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Meter returns an OpenTelemetry Meter that does not record any telemetry.
|
||||||
|
func (MeterProvider) Meter(string, ...metric.MeterOption) metric.Meter {
|
||||||
|
return Meter{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Meter is an OpenTelemetry No-Op Meter.
|
||||||
|
type Meter struct{ embedded.Meter }
|
||||||
|
|
||||||
|
// Int64Counter returns a Counter used to record int64 measurements that
|
||||||
|
// produces no telemetry.
|
||||||
|
func (Meter) Int64Counter(string, ...metric.Int64CounterOption) (metric.Int64Counter, error) {
|
||||||
|
return Int64Counter{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int64UpDownCounter returns an UpDownCounter used to record int64
|
||||||
|
// measurements that produces no telemetry.
|
||||||
|
func (Meter) Int64UpDownCounter(string, ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) {
|
||||||
|
return Int64UpDownCounter{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int64Histogram returns a Histogram used to record int64 measurements that
|
||||||
|
// produces no telemetry.
|
||||||
|
func (Meter) Int64Histogram(string, ...metric.Int64HistogramOption) (metric.Int64Histogram, error) {
|
||||||
|
return Int64Histogram{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int64ObservableCounter returns an ObservableCounter used to record int64
|
||||||
|
// measurements that produces no telemetry.
|
||||||
|
func (Meter) Int64ObservableCounter(string, ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) {
|
||||||
|
return Int64ObservableCounter{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int64ObservableUpDownCounter returns an ObservableUpDownCounter used to
|
||||||
|
// record int64 measurements that produces no telemetry.
|
||||||
|
func (Meter) Int64ObservableUpDownCounter(string, ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) {
|
||||||
|
return Int64ObservableUpDownCounter{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int64ObservableGauge returns an ObservableGauge used to record int64
|
||||||
|
// measurements that produces no telemetry.
|
||||||
|
func (Meter) Int64ObservableGauge(string, ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) {
|
||||||
|
return Int64ObservableGauge{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64Counter returns a Counter used to record int64 measurements that
|
||||||
|
// produces no telemetry.
|
||||||
|
func (Meter) Float64Counter(string, ...metric.Float64CounterOption) (metric.Float64Counter, error) {
|
||||||
|
return Float64Counter{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64UpDownCounter returns an UpDownCounter used to record int64
|
||||||
|
// measurements that produces no telemetry.
|
||||||
|
func (Meter) Float64UpDownCounter(string, ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) {
|
||||||
|
return Float64UpDownCounter{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64Histogram returns a Histogram used to record int64 measurements that
|
||||||
|
// produces no telemetry.
|
||||||
|
func (Meter) Float64Histogram(string, ...metric.Float64HistogramOption) (metric.Float64Histogram, error) {
|
||||||
|
return Float64Histogram{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64ObservableCounter returns an ObservableCounter used to record int64
|
||||||
|
// measurements that produces no telemetry.
|
||||||
|
func (Meter) Float64ObservableCounter(string, ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) {
|
||||||
|
return Float64ObservableCounter{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64ObservableUpDownCounter returns an ObservableUpDownCounter used to
|
||||||
|
// record int64 measurements that produces no telemetry.
|
||||||
|
func (Meter) Float64ObservableUpDownCounter(string, ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) {
|
||||||
|
return Float64ObservableUpDownCounter{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64ObservableGauge returns an ObservableGauge used to record int64
|
||||||
|
// measurements that produces no telemetry.
|
||||||
|
func (Meter) Float64ObservableGauge(string, ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) {
|
||||||
|
return Float64ObservableGauge{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterCallback performs no operation.
|
||||||
|
func (Meter) RegisterCallback(metric.Callback, ...metric.Observable) (metric.Registration, error) {
|
||||||
|
return Registration{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Observer acts as a recorder of measurements for multiple instruments in a
|
||||||
|
// Callback, it performing no operation.
|
||||||
|
type Observer struct{ embedded.Observer }
|
||||||
|
|
||||||
|
// ObserveFloat64 performs no operation.
|
||||||
|
func (Observer) ObserveFloat64(metric.Float64Observable, float64, ...metric.ObserveOption) {
|
||||||
|
}
|
||||||
|
|
||||||
|
// ObserveInt64 performs no operation.
|
||||||
|
func (Observer) ObserveInt64(metric.Int64Observable, int64, ...metric.ObserveOption) {
|
||||||
|
}
|
||||||
|
|
||||||
|
// Registration is the registration of a Callback with a No-Op Meter.
|
||||||
|
type Registration struct{ embedded.Registration }
|
||||||
|
|
||||||
|
// Unregister unregisters the Callback the Registration represents with the
|
||||||
|
// No-Op Meter. This will always return nil because the No-Op Meter performs no
|
||||||
|
// operation, including hold any record of registrations.
|
||||||
|
func (Registration) Unregister() error { return nil }
|
||||||
|
|
||||||
|
// Int64Counter is an OpenTelemetry Counter used to record int64 measurements.
|
||||||
|
// It produces no telemetry.
|
||||||
|
type Int64Counter struct{ embedded.Int64Counter }
|
||||||
|
|
||||||
|
// Add performs no operation.
|
||||||
|
func (Int64Counter) Add(context.Context, int64, ...metric.AddOption) {}
|
||||||
|
|
||||||
|
// Float64Counter is an OpenTelemetry Counter used to record float64
|
||||||
|
// measurements. It produces no telemetry.
|
||||||
|
type Float64Counter struct{ embedded.Float64Counter }
|
||||||
|
|
||||||
|
// Add performs no operation.
|
||||||
|
func (Float64Counter) Add(context.Context, float64, ...metric.AddOption) {}
|
||||||
|
|
||||||
|
// Int64UpDownCounter is an OpenTelemetry UpDownCounter used to record int64
|
||||||
|
// measurements. It produces no telemetry.
|
||||||
|
type Int64UpDownCounter struct{ embedded.Int64UpDownCounter }
|
||||||
|
|
||||||
|
// Add performs no operation.
|
||||||
|
func (Int64UpDownCounter) Add(context.Context, int64, ...metric.AddOption) {}
|
||||||
|
|
||||||
|
// Float64UpDownCounter is an OpenTelemetry UpDownCounter used to record
|
||||||
|
// float64 measurements. It produces no telemetry.
|
||||||
|
type Float64UpDownCounter struct{ embedded.Float64UpDownCounter }
|
||||||
|
|
||||||
|
// Add performs no operation.
|
||||||
|
func (Float64UpDownCounter) Add(context.Context, float64, ...metric.AddOption) {}
|
||||||
|
|
||||||
|
// Int64Histogram is an OpenTelemetry Histogram used to record int64
|
||||||
|
// measurements. It produces no telemetry.
|
||||||
|
type Int64Histogram struct{ embedded.Int64Histogram }
|
||||||
|
|
||||||
|
// Record performs no operation.
|
||||||
|
func (Int64Histogram) Record(context.Context, int64, ...metric.RecordOption) {}
|
||||||
|
|
||||||
|
// Float64Histogram is an OpenTelemetry Histogram used to record float64
|
||||||
|
// measurements. It produces no telemetry.
|
||||||
|
type Float64Histogram struct{ embedded.Float64Histogram }
|
||||||
|
|
||||||
|
// Record performs no operation.
|
||||||
|
func (Float64Histogram) Record(context.Context, float64, ...metric.RecordOption) {}
|
||||||
|
|
||||||
|
// Int64ObservableCounter is an OpenTelemetry ObservableCounter used to record
|
||||||
|
// int64 measurements. It produces no telemetry.
|
||||||
|
type Int64ObservableCounter struct {
|
||||||
|
metric.Int64Observable
|
||||||
|
embedded.Int64ObservableCounter
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64ObservableCounter is an OpenTelemetry ObservableCounter used to record
|
||||||
|
// float64 measurements. It produces no telemetry.
|
||||||
|
type Float64ObservableCounter struct {
|
||||||
|
metric.Float64Observable
|
||||||
|
embedded.Float64ObservableCounter
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int64ObservableGauge is an OpenTelemetry ObservableGauge used to record
|
||||||
|
// int64 measurements. It produces no telemetry.
|
||||||
|
type Int64ObservableGauge struct {
|
||||||
|
metric.Int64Observable
|
||||||
|
embedded.Int64ObservableGauge
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64ObservableGauge is an OpenTelemetry ObservableGauge used to record
|
||||||
|
// float64 measurements. It produces no telemetry.
|
||||||
|
type Float64ObservableGauge struct {
|
||||||
|
metric.Float64Observable
|
||||||
|
embedded.Float64ObservableGauge
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int64ObservableUpDownCounter is an OpenTelemetry ObservableUpDownCounter
|
||||||
|
// used to record int64 measurements. It produces no telemetry.
|
||||||
|
type Int64ObservableUpDownCounter struct {
|
||||||
|
metric.Int64Observable
|
||||||
|
embedded.Int64ObservableUpDownCounter
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64ObservableUpDownCounter is an OpenTelemetry ObservableUpDownCounter
|
||||||
|
// used to record float64 measurements. It produces no telemetry.
|
||||||
|
type Float64ObservableUpDownCounter struct {
|
||||||
|
metric.Float64Observable
|
||||||
|
embedded.Float64ObservableUpDownCounter
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int64Observer is a recorder of int64 measurements that performs no operation.
|
||||||
|
type Int64Observer struct{ embedded.Int64Observer }
|
||||||
|
|
||||||
|
// Observe performs no operation.
|
||||||
|
func (Int64Observer) Observe(int64, ...metric.ObserveOption) {}
|
||||||
|
|
||||||
|
// Float64Observer is a recorder of float64 measurements that performs no
|
||||||
|
// operation.
|
||||||
|
type Float64Observer struct{ embedded.Float64Observer }
|
||||||
|
|
||||||
|
// Observe performs no operation.
|
||||||
|
func (Float64Observer) Observe(float64, ...metric.ObserveOption) {}
|
|
@ -0,0 +1,201 @@
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
|
@ -0,0 +1,24 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package instrumentation provides types to represent the code libraries that
|
||||||
|
// provide OpenTelemetry instrumentation. These types are used in the
|
||||||
|
// OpenTelemetry signal pipelines to identify the source of telemetry.
|
||||||
|
//
|
||||||
|
// See
|
||||||
|
// https://github.com/open-telemetry/oteps/blob/d226b677d73a785523fe9b9701be13225ebc528d/text/0083-component.md
|
||||||
|
// and
|
||||||
|
// https://github.com/open-telemetry/oteps/blob/d226b677d73a785523fe9b9701be13225ebc528d/text/0201-scope-attributes.md
|
||||||
|
// for more information.
|
||||||
|
package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation"
|
|
@ -0,0 +1,19 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation"
|
||||||
|
|
||||||
|
// Library represents the instrumentation library.
|
||||||
|
// Deprecated: please use Scope instead.
|
||||||
|
type Library = Scope
|
|
@ -0,0 +1,26 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation"
|
||||||
|
|
||||||
|
// Scope represents the instrumentation scope.
|
||||||
|
type Scope struct {
|
||||||
|
// Name is the name of the instrumentation scope. This should be the
|
||||||
|
// Go package name of that scope.
|
||||||
|
Name string
|
||||||
|
// Version is the version of the instrumentation scope.
|
||||||
|
Version string
|
||||||
|
// SchemaURL of the telemetry emitted by the scope.
|
||||||
|
SchemaURL string
|
||||||
|
}
|
|
@ -0,0 +1,177 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package env // import "go.opentelemetry.io/otel/sdk/internal/env"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel/internal/global"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Environment variable names.
|
||||||
|
const (
|
||||||
|
// BatchSpanProcessorScheduleDelayKey is the delay interval between two
|
||||||
|
// consecutive exports (i.e. 5000).
|
||||||
|
BatchSpanProcessorScheduleDelayKey = "OTEL_BSP_SCHEDULE_DELAY"
|
||||||
|
// BatchSpanProcessorExportTimeoutKey is the maximum allowed time to
|
||||||
|
// export data (i.e. 3000).
|
||||||
|
BatchSpanProcessorExportTimeoutKey = "OTEL_BSP_EXPORT_TIMEOUT"
|
||||||
|
// BatchSpanProcessorMaxQueueSizeKey is the maximum queue size (i.e. 2048).
|
||||||
|
BatchSpanProcessorMaxQueueSizeKey = "OTEL_BSP_MAX_QUEUE_SIZE"
|
||||||
|
// BatchSpanProcessorMaxExportBatchSizeKey is the maximum batch size (i.e.
|
||||||
|
// 512). Note: it must be less than or equal to
|
||||||
|
// EnvBatchSpanProcessorMaxQueueSize.
|
||||||
|
BatchSpanProcessorMaxExportBatchSizeKey = "OTEL_BSP_MAX_EXPORT_BATCH_SIZE"
|
||||||
|
|
||||||
|
// AttributeValueLengthKey is the maximum allowed attribute value size.
|
||||||
|
AttributeValueLengthKey = "OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT"
|
||||||
|
|
||||||
|
// AttributeCountKey is the maximum allowed span attribute count.
|
||||||
|
AttributeCountKey = "OTEL_ATTRIBUTE_COUNT_LIMIT"
|
||||||
|
|
||||||
|
// SpanAttributeValueLengthKey is the maximum allowed attribute value size
|
||||||
|
// for a span.
|
||||||
|
SpanAttributeValueLengthKey = "OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT"
|
||||||
|
|
||||||
|
// SpanAttributeCountKey is the maximum allowed span attribute count for a
|
||||||
|
// span.
|
||||||
|
SpanAttributeCountKey = "OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT"
|
||||||
|
|
||||||
|
// SpanEventCountKey is the maximum allowed span event count.
|
||||||
|
SpanEventCountKey = "OTEL_SPAN_EVENT_COUNT_LIMIT"
|
||||||
|
|
||||||
|
// SpanEventAttributeCountKey is the maximum allowed attribute per span
|
||||||
|
// event count.
|
||||||
|
SpanEventAttributeCountKey = "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT"
|
||||||
|
|
||||||
|
// SpanLinkCountKey is the maximum allowed span link count.
|
||||||
|
SpanLinkCountKey = "OTEL_SPAN_LINK_COUNT_LIMIT"
|
||||||
|
|
||||||
|
// SpanLinkAttributeCountKey is the maximum allowed attribute per span
|
||||||
|
// link count.
|
||||||
|
SpanLinkAttributeCountKey = "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT"
|
||||||
|
)
|
||||||
|
|
||||||
|
// firstInt returns the value of the first matching environment variable from
|
||||||
|
// keys. If the value is not an integer or no match is found, defaultValue is
|
||||||
|
// returned.
|
||||||
|
func firstInt(defaultValue int, keys ...string) int {
|
||||||
|
for _, key := range keys {
|
||||||
|
value := os.Getenv(key)
|
||||||
|
if value == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
intValue, err := strconv.Atoi(value)
|
||||||
|
if err != nil {
|
||||||
|
global.Info("Got invalid value, number value expected.", key, value)
|
||||||
|
return defaultValue
|
||||||
|
}
|
||||||
|
|
||||||
|
return intValue
|
||||||
|
}
|
||||||
|
|
||||||
|
return defaultValue
|
||||||
|
}
|
||||||
|
|
||||||
|
// IntEnvOr returns the int value of the environment variable with name key if
|
||||||
|
// it exists, it is not empty, and the value is an int. Otherwise, defaultValue is returned.
|
||||||
|
func IntEnvOr(key string, defaultValue int) int {
|
||||||
|
value := os.Getenv(key)
|
||||||
|
if value == "" {
|
||||||
|
return defaultValue
|
||||||
|
}
|
||||||
|
|
||||||
|
intValue, err := strconv.Atoi(value)
|
||||||
|
if err != nil {
|
||||||
|
global.Info("Got invalid value, number value expected.", key, value)
|
||||||
|
return defaultValue
|
||||||
|
}
|
||||||
|
|
||||||
|
return intValue
|
||||||
|
}
|
||||||
|
|
||||||
|
// BatchSpanProcessorScheduleDelay returns the environment variable value for
|
||||||
|
// the OTEL_BSP_SCHEDULE_DELAY key if it exists, otherwise defaultValue is
|
||||||
|
// returned.
|
||||||
|
func BatchSpanProcessorScheduleDelay(defaultValue int) int {
|
||||||
|
return IntEnvOr(BatchSpanProcessorScheduleDelayKey, defaultValue)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BatchSpanProcessorExportTimeout returns the environment variable value for
|
||||||
|
// the OTEL_BSP_EXPORT_TIMEOUT key if it exists, otherwise defaultValue is
|
||||||
|
// returned.
|
||||||
|
func BatchSpanProcessorExportTimeout(defaultValue int) int {
|
||||||
|
return IntEnvOr(BatchSpanProcessorExportTimeoutKey, defaultValue)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BatchSpanProcessorMaxQueueSize returns the environment variable value for
|
||||||
|
// the OTEL_BSP_MAX_QUEUE_SIZE key if it exists, otherwise defaultValue is
|
||||||
|
// returned.
|
||||||
|
func BatchSpanProcessorMaxQueueSize(defaultValue int) int {
|
||||||
|
return IntEnvOr(BatchSpanProcessorMaxQueueSizeKey, defaultValue)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BatchSpanProcessorMaxExportBatchSize returns the environment variable value for
|
||||||
|
// the OTEL_BSP_MAX_EXPORT_BATCH_SIZE key if it exists, otherwise defaultValue
|
||||||
|
// is returned.
|
||||||
|
func BatchSpanProcessorMaxExportBatchSize(defaultValue int) int {
|
||||||
|
return IntEnvOr(BatchSpanProcessorMaxExportBatchSizeKey, defaultValue)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SpanAttributeValueLength returns the environment variable value for the
|
||||||
|
// OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT key if it exists. Otherwise, the
|
||||||
|
// environment variable value for OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT is
|
||||||
|
// returned or defaultValue if that is not set.
|
||||||
|
func SpanAttributeValueLength(defaultValue int) int {
|
||||||
|
return firstInt(defaultValue, SpanAttributeValueLengthKey, AttributeValueLengthKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SpanAttributeCount returns the environment variable value for the
|
||||||
|
// OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT key if it exists. Otherwise, the
|
||||||
|
// environment variable value for OTEL_ATTRIBUTE_COUNT_LIMIT is returned or
|
||||||
|
// defaultValue if that is not set.
|
||||||
|
func SpanAttributeCount(defaultValue int) int {
|
||||||
|
return firstInt(defaultValue, SpanAttributeCountKey, AttributeCountKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SpanEventCount returns the environment variable value for the
|
||||||
|
// OTEL_SPAN_EVENT_COUNT_LIMIT key if it exists, otherwise defaultValue is
|
||||||
|
// returned.
|
||||||
|
func SpanEventCount(defaultValue int) int {
|
||||||
|
return IntEnvOr(SpanEventCountKey, defaultValue)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SpanEventAttributeCount returns the environment variable value for the
|
||||||
|
// OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT key if it exists, otherwise defaultValue
|
||||||
|
// is returned.
|
||||||
|
func SpanEventAttributeCount(defaultValue int) int {
|
||||||
|
return IntEnvOr(SpanEventAttributeCountKey, defaultValue)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SpanLinkCount returns the environment variable value for the
|
||||||
|
// OTEL_SPAN_LINK_COUNT_LIMIT key if it exists, otherwise defaultValue is
|
||||||
|
// returned.
|
||||||
|
func SpanLinkCount(defaultValue int) int {
|
||||||
|
return IntEnvOr(SpanLinkCountKey, defaultValue)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SpanLinkAttributeCount returns the environment variable value for the
|
||||||
|
// OTEL_LINK_ATTRIBUTE_COUNT_LIMIT key if it exists, otherwise defaultValue is
|
||||||
|
// returned.
|
||||||
|
func SpanLinkAttributeCount(defaultValue int) int {
|
||||||
|
return IntEnvOr(SpanLinkAttributeCountKey, defaultValue)
|
||||||
|
}
|
|
@ -0,0 +1,29 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package internal // import "go.opentelemetry.io/otel/sdk/internal"
|
||||||
|
|
||||||
|
//go:generate gotmpl --body=../../internal/shared/matchers/expectation.go.tmpl "--data={}" --out=matchers/expectation.go
|
||||||
|
//go:generate gotmpl --body=../../internal/shared/matchers/expecter.go.tmpl "--data={}" --out=matchers/expecter.go
|
||||||
|
//go:generate gotmpl --body=../../internal/shared/matchers/temporal_matcher.go.tmpl "--data={}" --out=matchers/temporal_matcher.go
|
||||||
|
|
||||||
|
//go:generate gotmpl --body=../../internal/shared/internaltest/alignment.go.tmpl "--data={}" --out=internaltest/alignment.go
|
||||||
|
//go:generate gotmpl --body=../../internal/shared/internaltest/env.go.tmpl "--data={}" --out=internaltest/env.go
|
||||||
|
//go:generate gotmpl --body=../../internal/shared/internaltest/env_test.go.tmpl "--data={}" --out=internaltest/env_test.go
|
||||||
|
//go:generate gotmpl --body=../../internal/shared/internaltest/errors.go.tmpl "--data={}" --out=internaltest/errors.go
|
||||||
|
//go:generate gotmpl --body=../../internal/shared/internaltest/harness.go.tmpl "--data={\"matchersImportPath\": \"go.opentelemetry.io/otel/sdk/internal/matchers\"}" --out=internaltest/harness.go
|
||||||
|
//go:generate gotmpl --body=../../internal/shared/internaltest/text_map_carrier.go.tmpl "--data={}" --out=internaltest/text_map_carrier.go
|
||||||
|
//go:generate gotmpl --body=../../internal/shared/internaltest/text_map_carrier_test.go.tmpl "--data={}" --out=internaltest/text_map_carrier_test.go
|
||||||
|
//go:generate gotmpl --body=../../internal/shared/internaltest/text_map_propagator.go.tmpl "--data={}" --out=internaltest/text_map_propagator.go
|
||||||
|
//go:generate gotmpl --body=../../internal/shared/internaltest/text_map_propagator_test.go.tmpl "--data={}" --out=internaltest/text_map_propagator_test.go
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue