mirror of https://github.com/docker/cli.git
Add a common interface between different Kubernetes Stack API versions and use it in kubernetes stack commands
* Add kubernetes Stack API v1beta2 client * Upgrade v1beta1 client to remove generated code Signed-off-by: Silvin Lubecki <silvin.lubecki@docker.com>
This commit is contained in:
parent
99bd7ed693
commit
f958c66a6d
|
@ -6,7 +6,6 @@ import (
|
|||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/kubernetes"
|
||||
composev1beta1 "github.com/docker/cli/kubernetes/client/clientset_generated/clientset/typed/compose/v1beta1"
|
||||
"github.com/docker/docker/pkg/homedir"
|
||||
"github.com/pkg/errors"
|
||||
flag "github.com/spf13/pflag"
|
||||
|
@ -77,20 +76,17 @@ func (c *KubeCli) composeClient() (*Factory, error) {
|
|||
return NewFactory(c.kubeNamespace, c.kubeConfig)
|
||||
}
|
||||
|
||||
func (c *KubeCli) stacks() (composev1beta1.StackInterface, error) {
|
||||
func (c *KubeCli) stacks() (stackClient, error) {
|
||||
version, err := kubernetes.GetStackAPIVersion(c.clientSet)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch version {
|
||||
case kubernetes.StackAPIV1Beta1:
|
||||
clientSet, err := composev1beta1.NewForConfig(c.kubeConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return clientSet.Stacks(c.kubeNamespace), nil
|
||||
return newStackV1Beta1(c.kubeConfig, c.kubeNamespace)
|
||||
case kubernetes.StackAPIV1Beta2:
|
||||
return newStackV1Beta2(c.kubeConfig, c.kubeNamespace)
|
||||
default:
|
||||
return nil, errors.Errorf("no supported Stack API version")
|
||||
}
|
||||
|
|
|
@ -1,54 +0,0 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
composetypes "github.com/docker/cli/cli/compose/types"
|
||||
apiv1beta1 "github.com/docker/cli/kubernetes/compose/v1beta1"
|
||||
"github.com/docker/cli/kubernetes/labels"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
)
|
||||
|
||||
// IsColliding verify that services defined in the stack collides with already deployed services
|
||||
func IsColliding(services corev1.ServiceInterface, stack *apiv1beta1.Stack, cfg *composetypes.Config) error {
|
||||
stackObjects := getServices(cfg)
|
||||
|
||||
for _, srv := range stackObjects {
|
||||
if err := verify(services, stack.Name, srv); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// verify checks wether the service is already present in kubernetes.
|
||||
// If we find the service by name but it doesn't have our label or it has a different value
|
||||
// than the stack name for the label, we fail (i.e. it will collide)
|
||||
func verify(services corev1.ServiceInterface, stackName string, service string) error {
|
||||
svc, err := services.Get(service, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
if key, ok := svc.ObjectMeta.Labels[labels.ForStackName]; ok {
|
||||
if key != stackName {
|
||||
return fmt.Errorf("service %s already present in stack named %s", service, key)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("service %s already present in the cluster", service)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getServices(cfg *composetypes.Config) []string {
|
||||
services := make([]string, len(cfg.Services))
|
||||
for i := range cfg.Services {
|
||||
services[i] = cfg.Services[i].Name
|
||||
}
|
||||
sort.Strings(services)
|
||||
return services
|
||||
}
|
|
@ -1,41 +1,327 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"github.com/docker/cli/kubernetes/labels"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/cli/cli/compose/loader"
|
||||
composeTypes "github.com/docker/cli/cli/compose/types"
|
||||
composetypes "github.com/docker/cli/cli/compose/types"
|
||||
"github.com/docker/cli/kubernetes/compose/v1beta1"
|
||||
"github.com/docker/cli/kubernetes/compose/v1beta2"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// toConfigMap converts a Compose Config to a Kube ConfigMap.
|
||||
func toConfigMap(stackName, name, key string, content []byte) *apiv1.ConfigMap {
|
||||
return &apiv1.ConfigMap{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ConfigMap",
|
||||
APIVersion: "v1",
|
||||
func loadStackData(composefile string) (*composetypes.Config, error) {
|
||||
parsed, err := loader.ParseYAML([]byte(composefile))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return loader.Load(composetypes.ConfigDetails{
|
||||
ConfigFiles: []composetypes.ConfigFile{
|
||||
{
|
||||
Config: parsed,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Conversions from internal stack to different stack compose component versions.
|
||||
func stackFromV1beta1(in *v1beta1.Stack) (stack, error) {
|
||||
cfg, err := loadStackData(in.Spec.ComposeFile)
|
||||
if err != nil {
|
||||
return stack{}, err
|
||||
}
|
||||
return stack{
|
||||
name: in.ObjectMeta.Name,
|
||||
composeFile: in.Spec.ComposeFile,
|
||||
spec: fromComposeConfig(cfg),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func stackToV1beta1(s stack) *v1beta1.Stack {
|
||||
return &v1beta1.Stack{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
labels.ForStackName: stackName,
|
||||
Name: s.name,
|
||||
},
|
||||
},
|
||||
Data: map[string]string{
|
||||
key: string(content),
|
||||
Spec: v1beta1.StackSpec{
|
||||
ComposeFile: s.composeFile,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// toSecret converts a Compose Secret to a Kube Secret.
|
||||
func toSecret(stackName, name, key string, content []byte) *apiv1.Secret {
|
||||
return &apiv1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
labels.ForStackName: stackName,
|
||||
},
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
key: content,
|
||||
},
|
||||
func stackFromV1beta2(in *v1beta2.Stack) stack {
|
||||
return stack{
|
||||
name: in.ObjectMeta.Name,
|
||||
spec: in.Spec,
|
||||
}
|
||||
}
|
||||
|
||||
func stackToV1beta2(s stack) *v1beta2.Stack {
|
||||
return &v1beta2.Stack{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: s.name,
|
||||
},
|
||||
Spec: s.spec,
|
||||
}
|
||||
}
|
||||
|
||||
func fromComposeConfig(c *composeTypes.Config) *v1beta2.StackSpec {
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
serviceConfigs := make([]v1beta2.ServiceConfig, len(c.Services))
|
||||
for i, s := range c.Services {
|
||||
serviceConfigs[i] = fromComposeServiceConfig(s)
|
||||
}
|
||||
return &v1beta2.StackSpec{
|
||||
Services: serviceConfigs,
|
||||
Secrets: fromComposeSecrets(c.Secrets),
|
||||
Configs: fromComposeConfigs(c.Configs),
|
||||
}
|
||||
}
|
||||
|
||||
func fromComposeSecrets(s map[string]composeTypes.SecretConfig) map[string]v1beta2.SecretConfig {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
m := map[string]v1beta2.SecretConfig{}
|
||||
for key, value := range s {
|
||||
m[key] = v1beta2.SecretConfig{
|
||||
Name: value.Name,
|
||||
File: value.File,
|
||||
External: v1beta2.External{
|
||||
Name: value.External.Name,
|
||||
External: value.External.External,
|
||||
},
|
||||
Labels: value.Labels,
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func fromComposeConfigs(s map[string]composeTypes.ConfigObjConfig) map[string]v1beta2.ConfigObjConfig {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
m := map[string]v1beta2.ConfigObjConfig{}
|
||||
for key, value := range s {
|
||||
m[key] = v1beta2.ConfigObjConfig{
|
||||
Name: value.Name,
|
||||
File: value.File,
|
||||
External: v1beta2.External{
|
||||
Name: value.External.Name,
|
||||
External: value.External.External,
|
||||
},
|
||||
Labels: value.Labels,
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func fromComposeServiceConfig(s composeTypes.ServiceConfig) v1beta2.ServiceConfig {
|
||||
var userID *int64
|
||||
if s.User != "" {
|
||||
numerical, err := strconv.Atoi(s.User)
|
||||
if err == nil {
|
||||
unixUserID := int64(numerical)
|
||||
userID = &unixUserID
|
||||
}
|
||||
}
|
||||
return v1beta2.ServiceConfig{
|
||||
Name: s.Name,
|
||||
CapAdd: s.CapAdd,
|
||||
CapDrop: s.CapDrop,
|
||||
Command: s.Command,
|
||||
Configs: fromComposeServiceConfigs(s.Configs),
|
||||
Deploy: v1beta2.DeployConfig{
|
||||
Mode: s.Deploy.Mode,
|
||||
Replicas: s.Deploy.Replicas,
|
||||
Labels: s.Deploy.Labels,
|
||||
UpdateConfig: fromComposeUpdateConfig(s.Deploy.UpdateConfig),
|
||||
Resources: fromComposeResources(s.Deploy.Resources),
|
||||
RestartPolicy: fromComposeRestartPolicy(s.Deploy.RestartPolicy),
|
||||
Placement: fromComposePlacement(s.Deploy.Placement),
|
||||
},
|
||||
Entrypoint: s.Entrypoint,
|
||||
Environment: s.Environment,
|
||||
ExtraHosts: s.ExtraHosts,
|
||||
Hostname: s.Hostname,
|
||||
HealthCheck: fromComposeHealthcheck(s.HealthCheck),
|
||||
Image: s.Image,
|
||||
Ipc: s.Ipc,
|
||||
Labels: s.Labels,
|
||||
Pid: s.Pid,
|
||||
Ports: fromComposePorts(s.Ports),
|
||||
Privileged: s.Privileged,
|
||||
ReadOnly: s.ReadOnly,
|
||||
Secrets: fromComposeServiceSecrets(s.Secrets),
|
||||
StdinOpen: s.StdinOpen,
|
||||
StopGracePeriod: s.StopGracePeriod,
|
||||
Tmpfs: s.Tmpfs,
|
||||
Tty: s.Tty,
|
||||
User: userID,
|
||||
Volumes: fromComposeServiceVolumeConfig(s.Volumes),
|
||||
WorkingDir: s.WorkingDir,
|
||||
}
|
||||
}
|
||||
|
||||
func fromComposePorts(ports []composeTypes.ServicePortConfig) []v1beta2.ServicePortConfig {
|
||||
if ports == nil {
|
||||
return nil
|
||||
}
|
||||
p := make([]v1beta2.ServicePortConfig, len(ports))
|
||||
for i, port := range ports {
|
||||
p[i] = v1beta2.ServicePortConfig{
|
||||
Mode: port.Mode,
|
||||
Target: port.Target,
|
||||
Published: port.Published,
|
||||
Protocol: port.Protocol,
|
||||
}
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func fromComposeServiceSecrets(secrets []composeTypes.ServiceSecretConfig) []v1beta2.ServiceSecretConfig {
|
||||
if secrets == nil {
|
||||
return nil
|
||||
}
|
||||
c := make([]v1beta2.ServiceSecretConfig, len(secrets))
|
||||
for i, secret := range secrets {
|
||||
c[i] = v1beta2.ServiceSecretConfig{
|
||||
Source: secret.Source,
|
||||
Target: secret.Target,
|
||||
UID: secret.UID,
|
||||
Mode: secret.Mode,
|
||||
}
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func fromComposeServiceConfigs(configs []composeTypes.ServiceConfigObjConfig) []v1beta2.ServiceConfigObjConfig {
|
||||
if configs == nil {
|
||||
return nil
|
||||
}
|
||||
c := make([]v1beta2.ServiceConfigObjConfig, len(configs))
|
||||
for i, config := range configs {
|
||||
c[i] = v1beta2.ServiceConfigObjConfig{
|
||||
Source: config.Source,
|
||||
Target: config.Target,
|
||||
UID: config.UID,
|
||||
Mode: config.Mode,
|
||||
}
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func fromComposeHealthcheck(h *composeTypes.HealthCheckConfig) *v1beta2.HealthCheckConfig {
|
||||
if h == nil {
|
||||
return nil
|
||||
}
|
||||
return &v1beta2.HealthCheckConfig{
|
||||
Test: h.Test,
|
||||
Timeout: h.Timeout,
|
||||
Interval: h.Interval,
|
||||
Retries: h.Retries,
|
||||
}
|
||||
}
|
||||
|
||||
func fromComposePlacement(p composeTypes.Placement) v1beta2.Placement {
|
||||
return v1beta2.Placement{
|
||||
Constraints: fromComposeConstraints(p.Constraints),
|
||||
}
|
||||
}
|
||||
|
||||
var constraintEquals = regexp.MustCompile(`([\w\.]*)\W*(==|!=)\W*([\w\.]*)`)
|
||||
|
||||
const (
|
||||
swarmOs = "node.platform.os"
|
||||
swarmArch = "node.platform.arch"
|
||||
swarmHostname = "node.hostname"
|
||||
swarmLabelPrefix = "node.labels."
|
||||
)
|
||||
|
||||
func fromComposeConstraints(s []string) *v1beta2.Constraints {
|
||||
if len(s) == 0 {
|
||||
return nil
|
||||
}
|
||||
constraints := &v1beta2.Constraints{}
|
||||
for _, constraint := range s {
|
||||
matches := constraintEquals.FindStringSubmatch(constraint)
|
||||
if len(matches) == 4 {
|
||||
key := matches[1]
|
||||
operator := matches[2]
|
||||
value := matches[3]
|
||||
constraint := &v1beta2.Constraint{
|
||||
Operator: operator,
|
||||
Value: value,
|
||||
}
|
||||
switch {
|
||||
case key == swarmOs:
|
||||
constraints.OperatingSystem = constraint
|
||||
case key == swarmArch:
|
||||
constraints.Architecture = constraint
|
||||
case key == swarmHostname:
|
||||
constraints.Hostname = constraint
|
||||
case strings.HasPrefix(key, swarmLabelPrefix):
|
||||
if constraints.MatchLabels == nil {
|
||||
constraints.MatchLabels = map[string]v1beta2.Constraint{}
|
||||
}
|
||||
constraints.MatchLabels[strings.TrimPrefix(key, swarmLabelPrefix)] = *constraint
|
||||
}
|
||||
}
|
||||
}
|
||||
return constraints
|
||||
}
|
||||
|
||||
func fromComposeResources(r composeTypes.Resources) v1beta2.Resources {
|
||||
return v1beta2.Resources{
|
||||
Limits: fromComposeResourcesResource(r.Limits),
|
||||
Reservations: fromComposeResourcesResource(r.Reservations),
|
||||
}
|
||||
}
|
||||
|
||||
func fromComposeResourcesResource(r *composeTypes.Resource) *v1beta2.Resource {
|
||||
if r == nil {
|
||||
return nil
|
||||
}
|
||||
return &v1beta2.Resource{
|
||||
MemoryBytes: int64(r.MemoryBytes),
|
||||
NanoCPUs: r.NanoCPUs,
|
||||
}
|
||||
}
|
||||
|
||||
func fromComposeUpdateConfig(u *composeTypes.UpdateConfig) *v1beta2.UpdateConfig {
|
||||
if u == nil {
|
||||
return nil
|
||||
}
|
||||
return &v1beta2.UpdateConfig{
|
||||
Parallelism: u.Parallelism,
|
||||
}
|
||||
}
|
||||
|
||||
func fromComposeRestartPolicy(r *composeTypes.RestartPolicy) *v1beta2.RestartPolicy {
|
||||
if r == nil {
|
||||
return nil
|
||||
}
|
||||
return &v1beta2.RestartPolicy{
|
||||
Condition: r.Condition,
|
||||
}
|
||||
}
|
||||
|
||||
func fromComposeServiceVolumeConfig(vs []composeTypes.ServiceVolumeConfig) []v1beta2.ServiceVolumeConfig {
|
||||
if vs == nil {
|
||||
return nil
|
||||
}
|
||||
volumes := []v1beta2.ServiceVolumeConfig{}
|
||||
for _, v := range vs {
|
||||
volumes = append(volumes, v1beta2.ServiceVolumeConfig{
|
||||
Type: v.Type,
|
||||
Source: v.Source,
|
||||
Target: v.Target,
|
||||
ReadOnly: v.ReadOnly,
|
||||
})
|
||||
}
|
||||
return volumes
|
||||
}
|
||||
|
|
|
@ -2,15 +2,10 @@ package kubernetes
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
|
||||
"github.com/docker/cli/cli/command/stack/loader"
|
||||
"github.com/docker/cli/cli/command/stack/options"
|
||||
composetypes "github.com/docker/cli/cli/compose/types"
|
||||
"github.com/pkg/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
)
|
||||
|
||||
// RunDeploy is the kubernetes implementation of docker stack deploy
|
||||
|
@ -21,16 +16,6 @@ func RunDeploy(dockerCli *KubeCli, opts options.Deploy) error {
|
|||
return errors.Errorf("Please specify only one compose file (with --compose-file).")
|
||||
}
|
||||
|
||||
// Parse the compose file
|
||||
cfg, err := loader.LoadComposefile(dockerCli, opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stack, err := LoadStack(opts.Namespace, *cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Initialize clients
|
||||
stacks, err := dockerCli.stacks()
|
||||
if err != nil {
|
||||
|
@ -40,6 +25,17 @@ func RunDeploy(dockerCli *KubeCli, opts options.Deploy) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Parse the compose file
|
||||
cfg, err := loader.LoadComposefile(dockerCli, opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stack, err := stacks.FromCompose(opts.Namespace, *cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
configMaps := composeClient.ConfigMaps()
|
||||
secrets := composeClient.Secrets()
|
||||
services := composeClient.Services()
|
||||
|
@ -48,93 +44,27 @@ func RunDeploy(dockerCli *KubeCli, opts options.Deploy) error {
|
|||
Pods: pods,
|
||||
}
|
||||
|
||||
// FIXME(vdemeester) handle warnings server-side
|
||||
if err = IsColliding(services, stack, cfg); err != nil {
|
||||
if err := stacks.IsColliding(services, stack); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = createFileBasedConfigMaps(stack.Name, cfg.Configs, configMaps); err != nil {
|
||||
if err := stack.createFileBasedConfigMaps(configMaps); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = createFileBasedSecrets(stack.Name, cfg.Secrets, secrets); err != nil {
|
||||
if err := stack.createFileBasedSecrets(secrets); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if in, err := stacks.Get(stack.Name, metav1.GetOptions{}); err == nil {
|
||||
in.Spec = stack.Spec
|
||||
|
||||
if _, err = stacks.Update(in); err != nil {
|
||||
if err = stacks.CreateOrUpdate(stack); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("Stack %s was updated\n", stack.Name)
|
||||
} else {
|
||||
if _, err = stacks.Create(stack); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Fprintf(cmdOut, "Stack %s was created\n", stack.Name)
|
||||
}
|
||||
|
||||
fmt.Fprintln(cmdOut, "Waiting for the stack to be stable and running...")
|
||||
|
||||
<-watcher.Watch(stack, serviceNames(cfg))
|
||||
<-watcher.Watch(stack.name, stack.getServices())
|
||||
|
||||
fmt.Fprintf(cmdOut, "Stack %s is stable and running\n\n", stack.Name)
|
||||
// TODO: fmt.Fprintf(cmdOut, "Read the logs with:\n $ %s stack logs %s\n", filepath.Base(os.Args[0]), stack.Name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// createFileBasedConfigMaps creates a Kubernetes ConfigMap for each Compose global file-based config.
|
||||
func createFileBasedConfigMaps(stackName string, globalConfigs map[string]composetypes.ConfigObjConfig, configMaps corev1.ConfigMapInterface) error {
|
||||
for name, config := range globalConfigs {
|
||||
if config.File == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
fileName := path.Base(config.File)
|
||||
content, err := ioutil.ReadFile(config.File)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := configMaps.Create(toConfigMap(stackName, name, fileName, content)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func serviceNames(cfg *composetypes.Config) []string {
|
||||
names := []string{}
|
||||
|
||||
for _, service := range cfg.Services {
|
||||
names = append(names, service.Name)
|
||||
}
|
||||
|
||||
return names
|
||||
}
|
||||
|
||||
// createFileBasedSecrets creates a Kubernetes Secret for each Compose global file-based secret.
|
||||
func createFileBasedSecrets(stackName string, globalSecrets map[string]composetypes.SecretConfig, secrets corev1.SecretInterface) error {
|
||||
for name, secret := range globalSecrets {
|
||||
if secret.File == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
fileName := path.Base(secret.File)
|
||||
content, err := ioutil.ReadFile(secret.File)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := secrets.Create(toSecret(stackName, name, fileName, content)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
fmt.Fprintf(cmdOut, "Stack %s is stable and running\n\n", stack.name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -5,8 +5,6 @@ import (
|
|||
|
||||
"github.com/docker/cli/cli/command/formatter"
|
||||
"github.com/docker/cli/cli/command/stack/options"
|
||||
"github.com/docker/cli/cli/compose/loader"
|
||||
composetypes "github.com/docker/cli/cli/compose/types"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"vbom.ml/util/sortorder"
|
||||
)
|
||||
|
@ -46,29 +44,11 @@ func getStacks(kubeCli *KubeCli) ([]*formatter.Stack, error) {
|
|||
return nil, err
|
||||
}
|
||||
var formattedStacks []*formatter.Stack
|
||||
for _, stack := range stacks.Items {
|
||||
cfg, err := loadStack(stack.Spec.ComposeFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, stack := range stacks {
|
||||
formattedStacks = append(formattedStacks, &formatter.Stack{
|
||||
Name: stack.Name,
|
||||
Services: len(getServices(cfg)),
|
||||
Name: stack.name,
|
||||
Services: len(stack.getServices()),
|
||||
})
|
||||
}
|
||||
return formattedStacks, nil
|
||||
}
|
||||
|
||||
func loadStack(composefile string) (*composetypes.Config, error) {
|
||||
parsed, err := loader.ParseYAML([]byte(composefile))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return loader.Load(composetypes.ConfigDetails{
|
||||
ConfigFiles: []composetypes.ConfigFile{
|
||||
{
|
||||
Config: parsed,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
|
|
@ -1,24 +0,0 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
composetypes "github.com/docker/cli/cli/compose/types"
|
||||
apiv1beta1 "github.com/docker/cli/kubernetes/compose/v1beta1"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// LoadStack loads a stack from a Compose config, with a given name.
|
||||
func LoadStack(name string, cfg composetypes.Config) (*apiv1beta1.Stack, error) {
|
||||
res, err := yaml.Marshal(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &apiv1beta1.Stack{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: apiv1beta1.StackSpec{
|
||||
ComposeFile: string(res),
|
||||
},
|
||||
}, nil
|
||||
}
|
|
@ -1,55 +0,0 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
composetypes "github.com/docker/cli/cli/compose/types"
|
||||
apiv1beta1 "github.com/docker/cli/kubernetes/compose/v1beta1"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/gotestyourself/gotestyourself/assert"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func TestLoadStack(t *testing.T) {
|
||||
s, err := LoadStack("foo", composetypes.Config{
|
||||
Version: "3.1",
|
||||
Filename: "banana",
|
||||
Services: []composetypes.ServiceConfig{
|
||||
{
|
||||
Name: "foo",
|
||||
Image: "foo",
|
||||
},
|
||||
{
|
||||
Name: "bar",
|
||||
Image: "bar",
|
||||
},
|
||||
},
|
||||
})
|
||||
assert.NilError(t, err)
|
||||
assert.DeepEqual(t, &apiv1beta1.Stack{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
},
|
||||
Spec: apiv1beta1.StackSpec{
|
||||
ComposeFile: `version: "3.1"
|
||||
services:
|
||||
bar:
|
||||
image: bar
|
||||
foo:
|
||||
image: foo
|
||||
networks: {}
|
||||
volumes: {}
|
||||
secrets: {}
|
||||
configs: {}
|
||||
`,
|
||||
},
|
||||
}, s, cmpKubeAPITime)
|
||||
}
|
||||
|
||||
// TODO: this can be removed when k8s.io/apimachinery is updated to > 1.9.0
|
||||
var cmpKubeAPITime = cmp.Comparer(func(x, y *metav1.Time) bool {
|
||||
if x == nil || y == nil {
|
||||
return x == y
|
||||
}
|
||||
return x.Time.Equal(y.Time)
|
||||
})
|
|
@ -32,7 +32,7 @@ func RunPS(dockerCli *KubeCli, options options.PS) error {
|
|||
podsClient := client.Pods()
|
||||
|
||||
// Fetch pods
|
||||
if _, err := stacks.Get(namespace, metav1.GetOptions{}); err != nil {
|
||||
if _, err := stacks.Get(namespace); err != nil {
|
||||
return fmt.Errorf("nothing found in stack: %s", namespace)
|
||||
}
|
||||
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"fmt"
|
||||
|
||||
"github.com/docker/cli/cli/command/stack/options"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// RunRemove is the kubernetes implementation of docker stack remove
|
||||
|
@ -15,7 +14,7 @@ func RunRemove(dockerCli *KubeCli, opts options.Remove) error {
|
|||
}
|
||||
for _, stack := range opts.Namespaces {
|
||||
fmt.Fprintf(dockerCli.Out(), "Removing stack: %s\n", stack)
|
||||
err := stacks.Delete(stack, &metav1.DeleteOptions{})
|
||||
err := stacks.Delete(stack)
|
||||
if err != nil {
|
||||
fmt.Fprintf(dockerCli.Out(), "Failed to remove stack %s: %s\n", stack, err)
|
||||
return err
|
||||
|
|
|
@ -22,7 +22,7 @@ func RunServices(dockerCli *KubeCli, opts options.Services) error {
|
|||
}
|
||||
replicas := client.ReplicaSets()
|
||||
|
||||
if _, err := stacks.Get(opts.Namespace, metav1.GetOptions{}); err != nil {
|
||||
if _, err := stacks.Get(opts.Namespace); err != nil {
|
||||
fmt.Fprintf(dockerCli.Err(), "Nothing found in stack: %s\n", opts.Namespace)
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -0,0 +1,106 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"sort"
|
||||
|
||||
"github.com/docker/cli/kubernetes/compose/v1beta2"
|
||||
"github.com/docker/cli/kubernetes/labels"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
)
|
||||
|
||||
// stack is the main type used by stack commands so they remain independent from kubernetes compose component version.
|
||||
type stack struct {
|
||||
name string
|
||||
composeFile string
|
||||
spec *v1beta2.StackSpec
|
||||
}
|
||||
|
||||
// getServices returns all the stack service names, sorted lexicographically
|
||||
func (s *stack) getServices() []string {
|
||||
services := make([]string, len(s.spec.Services))
|
||||
for i, service := range s.spec.Services {
|
||||
services[i] = service.Name
|
||||
}
|
||||
sort.Strings(services)
|
||||
return services
|
||||
}
|
||||
|
||||
// createFileBasedConfigMaps creates a Kubernetes ConfigMap for each Compose global file-based config.
|
||||
func (s *stack) createFileBasedConfigMaps(configMaps corev1.ConfigMapInterface) error {
|
||||
for name, config := range s.spec.Configs {
|
||||
if config.File == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
fileName := path.Base(config.File)
|
||||
content, err := ioutil.ReadFile(config.File)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := configMaps.Create(toConfigMap(s.name, name, fileName, content)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// toConfigMap converts a Compose Config to a Kube ConfigMap.
|
||||
func toConfigMap(stackName, name, key string, content []byte) *apiv1.ConfigMap {
|
||||
return &apiv1.ConfigMap{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ConfigMap",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
labels.ForStackName: stackName,
|
||||
},
|
||||
},
|
||||
Data: map[string]string{
|
||||
key: string(content),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// createFileBasedSecrets creates a Kubernetes Secret for each Compose global file-based secret.
|
||||
func (s *stack) createFileBasedSecrets(secrets corev1.SecretInterface) error {
|
||||
for name, secret := range s.spec.Secrets {
|
||||
if secret.File == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
fileName := path.Base(secret.File)
|
||||
content, err := ioutil.ReadFile(secret.File)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := secrets.Create(toSecret(s.name, name, fileName, content)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// toSecret converts a Compose Secret to a Kube Secret.
|
||||
func toSecret(stackName, name, key string, content []byte) *apiv1.Secret {
|
||||
return &apiv1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
labels.ForStackName: stackName,
|
||||
},
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
key: content,
|
||||
},
|
||||
}
|
||||
}
|
|
@ -0,0 +1,177 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
composetypes "github.com/docker/cli/cli/compose/types"
|
||||
composev1beta1 "github.com/docker/cli/kubernetes/client/clientset/typed/compose/v1beta1"
|
||||
composev1beta2 "github.com/docker/cli/kubernetes/client/clientset/typed/compose/v1beta2"
|
||||
"github.com/docker/cli/kubernetes/labels"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
// stackClient talks to a kubernetes compose component.
|
||||
type stackClient interface {
|
||||
CreateOrUpdate(s stack) error
|
||||
Delete(name string) error
|
||||
Get(name string) (stack, error)
|
||||
List(opts metav1.ListOptions) ([]stack, error)
|
||||
IsColliding(servicesClient corev1.ServiceInterface, s stack) error
|
||||
FromCompose(name string, cfg composetypes.Config) (stack, error)
|
||||
}
|
||||
|
||||
// stackV1Beta1 implements stackClient interface and talks to compose component v1beta1.
|
||||
type stackV1Beta1 struct {
|
||||
stacks composev1beta1.StackInterface
|
||||
}
|
||||
|
||||
func newStackV1Beta1(config *rest.Config, namespace string) (stackClient, error) {
|
||||
client, err := composev1beta1.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &stackV1Beta1{stacks: client.Stacks(namespace)}, nil
|
||||
}
|
||||
|
||||
func (s *stackV1Beta1) CreateOrUpdate(internalStack stack) error {
|
||||
// If it already exists, update the stack
|
||||
if stackBeta1, err := s.stacks.Get(internalStack.name, metav1.GetOptions{}); err == nil {
|
||||
stackBeta1.Spec.ComposeFile = internalStack.composeFile
|
||||
_, err := s.stacks.Update(stackBeta1)
|
||||
return err
|
||||
}
|
||||
// Or create it
|
||||
_, err := s.stacks.Create(stackToV1beta1(internalStack))
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *stackV1Beta1) Delete(name string) error {
|
||||
return s.stacks.Delete(name, &metav1.DeleteOptions{})
|
||||
}
|
||||
|
||||
func (s *stackV1Beta1) Get(name string) (stack, error) {
|
||||
stackBeta1, err := s.stacks.Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return stack{}, err
|
||||
}
|
||||
return stackFromV1beta1(stackBeta1)
|
||||
}
|
||||
|
||||
func (s *stackV1Beta1) List(opts metav1.ListOptions) ([]stack, error) {
|
||||
list, err := s.stacks.List(opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stacks := make([]stack, len(list.Items))
|
||||
for i := range list.Items {
|
||||
stack, err := stackFromV1beta1(&list.Items[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stacks[i] = stack
|
||||
}
|
||||
return stacks, nil
|
||||
}
|
||||
|
||||
// IsColliding verifies that services defined in the stack collides with already deployed services
|
||||
func (s *stackV1Beta1) IsColliding(servicesClient corev1.ServiceInterface, st stack) error {
|
||||
for _, srv := range st.getServices() {
|
||||
if err := verify(servicesClient, st.name, srv); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// verify checks wether the service is already present in kubernetes.
|
||||
// If we find the service by name but it doesn't have our label or it has a different value
|
||||
// than the stack name for the label, we fail (i.e. it will collide)
|
||||
func verify(services corev1.ServiceInterface, stackName string, service string) error {
|
||||
svc, err := services.Get(service, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
if key, ok := svc.ObjectMeta.Labels[labels.ForStackName]; ok {
|
||||
if key != stackName {
|
||||
return fmt.Errorf("service %s already present in stack named %s", service, key)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("service %s already present in the cluster", service)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *stackV1Beta1) FromCompose(name string, cfg composetypes.Config) (stack, error) {
|
||||
res, err := yaml.Marshal(cfg)
|
||||
if err != nil {
|
||||
return stack{}, err
|
||||
}
|
||||
return stack{
|
||||
name: name,
|
||||
composeFile: string(res),
|
||||
spec: fromComposeConfig(&cfg),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// stackV1Beta2 implements stackClient interface and talks to compose component v1beta2.
|
||||
type stackV1Beta2 struct {
|
||||
stacks composev1beta2.StackInterface
|
||||
}
|
||||
|
||||
func newStackV1Beta2(config *rest.Config, namespace string) (stackClient, error) {
|
||||
client, err := composev1beta2.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &stackV1Beta2{stacks: client.Stacks(namespace)}, nil
|
||||
}
|
||||
|
||||
func (s *stackV1Beta2) CreateOrUpdate(internalStack stack) error {
|
||||
// If it already exists, update the stack
|
||||
if stackBeta2, err := s.stacks.Get(internalStack.name, metav1.GetOptions{}); err == nil {
|
||||
stackBeta2.Spec = internalStack.spec
|
||||
_, err := s.stacks.Update(stackBeta2)
|
||||
return err
|
||||
}
|
||||
// Or create it
|
||||
_, err := s.stacks.Create(stackToV1beta2(internalStack))
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *stackV1Beta2) Delete(name string) error {
|
||||
return s.stacks.Delete(name, &metav1.DeleteOptions{})
|
||||
}
|
||||
|
||||
func (s *stackV1Beta2) Get(name string) (stack, error) {
|
||||
stackBeta2, err := s.stacks.Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return stack{}, err
|
||||
}
|
||||
return stackFromV1beta2(stackBeta2), nil
|
||||
}
|
||||
|
||||
func (s *stackV1Beta2) List(opts metav1.ListOptions) ([]stack, error) {
|
||||
list, err := s.stacks.List(opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stacks := make([]stack, len(list.Items))
|
||||
for i := range list.Items {
|
||||
stacks[i] = stackFromV1beta2(&list.Items[i])
|
||||
}
|
||||
return stacks, nil
|
||||
}
|
||||
|
||||
// IsColliding is handle server side with the compose api v1beta2, so nothing to do here
|
||||
func (s *stackV1Beta2) IsColliding(servicesClient corev1.ServiceInterface, st stack) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *stackV1Beta2) FromCompose(name string, cfg composetypes.Config) (stack, error) {
|
||||
return stack{
|
||||
name: name,
|
||||
spec: fromComposeConfig(&cfg),
|
||||
}, nil
|
||||
}
|
|
@ -0,0 +1,39 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
composetypes "github.com/docker/cli/cli/compose/types"
|
||||
"github.com/gotestyourself/gotestyourself/assert"
|
||||
)
|
||||
|
||||
func TestFromCompose(t *testing.T) {
|
||||
stackClient := &stackV1Beta1{}
|
||||
s, err := stackClient.FromCompose("foo", composetypes.Config{
|
||||
Version: "3.1",
|
||||
Filename: "banana",
|
||||
Services: []composetypes.ServiceConfig{
|
||||
{
|
||||
Name: "foo",
|
||||
Image: "foo",
|
||||
},
|
||||
{
|
||||
Name: "bar",
|
||||
Image: "bar",
|
||||
},
|
||||
},
|
||||
})
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, "foo", s.name)
|
||||
assert.Equal(t, string(`version: "3.1"
|
||||
services:
|
||||
bar:
|
||||
image: bar
|
||||
foo:
|
||||
image: foo
|
||||
networks: {}
|
||||
volumes: {}
|
||||
secrets: {}
|
||||
configs: {}
|
||||
`), s.composeFile)
|
||||
}
|
|
@ -4,7 +4,6 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
apiv1beta1 "github.com/docker/cli/kubernetes/compose/v1beta1"
|
||||
"github.com/docker/cli/kubernetes/labels"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -17,10 +16,10 @@ type DeployWatcher struct {
|
|||
}
|
||||
|
||||
// Watch watches a stuck deployement and return a chan that will holds the state of the stack
|
||||
func (w DeployWatcher) Watch(stack *apiv1beta1.Stack, serviceNames []string) chan bool {
|
||||
func (w DeployWatcher) Watch(name string, serviceNames []string) chan bool {
|
||||
stop := make(chan bool)
|
||||
|
||||
go w.waitForPods(stack.Name, serviceNames, stop)
|
||||
go w.waitForPods(name, serviceNames, stop)
|
||||
|
||||
return stop
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package kubernetes
|
|||
|
||||
import (
|
||||
apiv1beta1 "github.com/docker/cli/kubernetes/compose/v1beta1"
|
||||
apiv1beta2 "github.com/docker/cli/kubernetes/compose/v1beta2"
|
||||
"github.com/pkg/errors"
|
||||
apimachinerymetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -15,6 +16,8 @@ type StackVersion string
|
|||
const (
|
||||
// StackAPIV1Beta1 is returned if it's the most recent version available.
|
||||
StackAPIV1Beta1 = StackVersion("v1beta1")
|
||||
// StackAPIV1Beta2 is returned if it's the most recent version available.
|
||||
StackAPIV1Beta2 = StackVersion("v1beta2")
|
||||
)
|
||||
|
||||
// GetStackAPIVersion returns the most recent stack API installed.
|
||||
|
@ -29,6 +32,8 @@ func GetStackAPIVersion(clientSet *kubernetes.Clientset) (StackVersion, error) {
|
|||
|
||||
func getAPIVersion(groups *metav1.APIGroupList) (StackVersion, error) {
|
||||
switch {
|
||||
case findVersion(apiv1beta2.SchemeGroupVersion, groups.Groups):
|
||||
return StackAPIV1Beta2, nil
|
||||
case findVersion(apiv1beta1.SchemeGroupVersion, groups.Groups):
|
||||
return StackAPIV1Beta1, nil
|
||||
default:
|
||||
|
|
|
@ -17,6 +17,8 @@ func TestGetStackAPIVersion(t *testing.T) {
|
|||
}{
|
||||
{"no stack api", makeGroups(), true, ""},
|
||||
{"v1beta1", makeGroups(groupVersion{"compose.docker.com", []string{"v1beta1"}}), false, StackAPIV1Beta1},
|
||||
{"v1beta2", makeGroups(groupVersion{"compose.docker.com", []string{"v1beta2"}}), false, StackAPIV1Beta2},
|
||||
{"most recent has precedence", makeGroups(groupVersion{"compose.docker.com", []string{"v1beta1", "v1beta2"}}), false, StackAPIV1Beta2},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
|
|
|
@ -1,27 +1,38 @@
|
|||
package clientset
|
||||
|
||||
import (
|
||||
composev1beta1 "github.com/docker/cli/kubernetes/client/clientset_generated/clientset/typed/compose/v1beta1"
|
||||
composev1beta1 "github.com/docker/cli/kubernetes/client/clientset/typed/compose/v1beta1"
|
||||
composev1beta2 "github.com/docker/cli/kubernetes/client/clientset/typed/compose/v1beta2"
|
||||
glog "github.com/golang/glog"
|
||||
discovery "k8s.io/client-go/discovery"
|
||||
rest "k8s.io/client-go/rest"
|
||||
flowcontrol "k8s.io/client-go/util/flowcontrol"
|
||||
)
|
||||
|
||||
// Interface defines the methods a compose kube client should have
|
||||
// FIXME(vdemeester) is it required ?
|
||||
type Interface interface {
|
||||
Discovery() discovery.DiscoveryInterface
|
||||
ComposeV1beta2() composev1beta2.ComposeV1beta2Interface
|
||||
ComposeV1beta1() composev1beta1.ComposeV1beta1Interface
|
||||
// Deprecated: please explicitly pick a version if possible.
|
||||
Compose() composev1beta1.ComposeV1beta1Interface
|
||||
}
|
||||
|
||||
// Clientset contains the clients for groups. Each group has exactly one
|
||||
// version included in a Clientset.
|
||||
type Clientset struct {
|
||||
*discovery.DiscoveryClient
|
||||
*composev1beta2.ComposeV1beta2Client
|
||||
*composev1beta1.ComposeV1beta1Client
|
||||
}
|
||||
|
||||
// ComposeV1beta2 retrieves the ComposeV1beta2Client
|
||||
func (c *Clientset) ComposeV1beta2() composev1beta2.ComposeV1beta2Interface {
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
return c.ComposeV1beta2Client
|
||||
}
|
||||
|
||||
// ComposeV1beta1 retrieves the ComposeV1beta1Client
|
||||
func (c *Clientset) ComposeV1beta1() composev1beta1.ComposeV1beta1Interface {
|
||||
if c == nil {
|
||||
|
@ -30,15 +41,6 @@ func (c *Clientset) ComposeV1beta1() composev1beta1.ComposeV1beta1Interface {
|
|||
return c.ComposeV1beta1Client
|
||||
}
|
||||
|
||||
// Deprecated: Compose retrieves the default version of ComposeClient.
|
||||
// Please explicitly pick a version.
|
||||
func (c *Clientset) Compose() composev1beta1.ComposeV1beta1Interface {
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
return c.ComposeV1beta1Client
|
||||
}
|
||||
|
||||
// Discovery retrieves the DiscoveryClient
|
||||
func (c *Clientset) Discovery() discovery.DiscoveryInterface {
|
||||
if c == nil {
|
||||
|
@ -55,6 +57,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) {
|
|||
}
|
||||
var cs Clientset
|
||||
var err error
|
||||
cs.ComposeV1beta2Client, err = composev1beta2.NewForConfig(&configShallowCopy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cs.ComposeV1beta1Client, err = composev1beta1.NewForConfig(&configShallowCopy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -72,6 +78,7 @@ func NewForConfig(c *rest.Config) (*Clientset, error) {
|
|||
// panics if there is an error in the config.
|
||||
func NewForConfigOrDie(c *rest.Config) *Clientset {
|
||||
var cs Clientset
|
||||
cs.ComposeV1beta2Client = composev1beta2.NewForConfigOrDie(c)
|
||||
cs.ComposeV1beta1Client = composev1beta1.NewForConfigOrDie(c)
|
||||
|
||||
cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c)
|
||||
|
@ -81,6 +88,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset {
|
|||
// New creates a new Clientset for the given RESTClient.
|
||||
func New(c rest.Interface) *Clientset {
|
||||
var cs Clientset
|
||||
cs.ComposeV1beta2Client = composev1beta2.New(c)
|
||||
cs.ComposeV1beta1Client = composev1beta1.New(c)
|
||||
|
||||
cs.DiscoveryClient = discovery.NewDiscoveryClient(c)
|
|
@ -2,15 +2,19 @@ package scheme
|
|||
|
||||
import (
|
||||
composev1beta1 "github.com/docker/cli/kubernetes/compose/v1beta1"
|
||||
composev1beta2 "github.com/docker/cli/kubernetes/compose/v1beta2"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
)
|
||||
|
||||
var Scheme = runtime.NewScheme()
|
||||
var Codecs = serializer.NewCodecFactory(Scheme)
|
||||
var ParameterCodec = runtime.NewParameterCodec(Scheme)
|
||||
// Variables required for registration
|
||||
var (
|
||||
Scheme = runtime.NewScheme()
|
||||
Codecs = serializer.NewCodecFactory(Scheme)
|
||||
ParameterCodec = runtime.NewParameterCodec(Scheme)
|
||||
)
|
||||
|
||||
func init() {
|
||||
v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
|
||||
|
@ -32,6 +36,6 @@ func init() {
|
|||
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
|
||||
// correctly.
|
||||
func AddToScheme(scheme *runtime.Scheme) {
|
||||
composev1beta2.AddToScheme(scheme)
|
||||
composev1beta1.AddToScheme(scheme)
|
||||
|
||||
}
|
|
@ -1,12 +1,13 @@
|
|||
package v1beta1
|
||||
|
||||
import (
|
||||
"github.com/docker/cli/kubernetes/client/clientset_generated/clientset/scheme"
|
||||
"github.com/docker/cli/kubernetes/client/clientset/scheme"
|
||||
v1beta1 "github.com/docker/cli/kubernetes/compose/v1beta1"
|
||||
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
rest "k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
// ComposeV1beta1Interface defines the methods a compose v1beta1 client has
|
||||
type ComposeV1beta1Interface interface {
|
||||
RESTClient() rest.Interface
|
||||
StacksGetter
|
||||
|
@ -17,6 +18,7 @@ type ComposeV1beta1Client struct {
|
|||
restClient rest.Interface
|
||||
}
|
||||
|
||||
// Stacks returns a stack client
|
||||
func (c *ComposeV1beta1Client) Stacks(namespace string) StackInterface {
|
||||
return newStacks(c, namespace)
|
||||
}
|
|
@ -1,7 +1,7 @@
|
|||
package v1beta1
|
||||
|
||||
import (
|
||||
scheme "github.com/docker/cli/kubernetes/client/clientset_generated/clientset/scheme"
|
||||
scheme "github.com/docker/cli/kubernetes/client/clientset/scheme"
|
||||
v1beta1 "github.com/docker/cli/kubernetes/compose/v1beta1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
|
@ -25,8 +25,7 @@ type StackInterface interface {
|
|||
Get(name string, options v1.GetOptions) (*v1beta1.Stack, error)
|
||||
List(opts v1.ListOptions) (*v1beta1.StackList, error)
|
||||
Watch(opts v1.ListOptions) (watch.Interface, error)
|
||||
Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Stack, err error)
|
||||
StackExpansion
|
||||
Patch(name string, pt types.PatchType, data []byte, subresources ...string) (*v1beta1.Stack, error)
|
||||
}
|
||||
|
||||
var _ StackInterface = &stacks{}
|
||||
|
@ -46,36 +45,36 @@ func newStacks(c *ComposeV1beta1Client, namespace string) *stacks {
|
|||
}
|
||||
|
||||
// Create takes the representation of a stack and creates it. Returns the server's representation of the stack, and an error, if there is any.
|
||||
func (c *stacks) Create(stack *v1beta1.Stack) (result *v1beta1.Stack, err error) {
|
||||
result = &v1beta1.Stack{}
|
||||
err = c.client.Post().
|
||||
func (c *stacks) Create(stack *v1beta1.Stack) (*v1beta1.Stack, error) {
|
||||
result := &v1beta1.Stack{}
|
||||
err := c.client.Post().
|
||||
Namespace(c.ns).
|
||||
Resource("stacks").
|
||||
Body(stack).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
return result, err
|
||||
}
|
||||
|
||||
// Update takes the representation of a stack and updates it. Returns the server's representation of the stack, and an error, if there is any.
|
||||
func (c *stacks) Update(stack *v1beta1.Stack) (result *v1beta1.Stack, err error) {
|
||||
result = &v1beta1.Stack{}
|
||||
err = c.client.Put().
|
||||
func (c *stacks) Update(stack *v1beta1.Stack) (*v1beta1.Stack, error) {
|
||||
result := &v1beta1.Stack{}
|
||||
err := c.client.Put().
|
||||
Namespace(c.ns).
|
||||
Resource("stacks").
|
||||
Name(stack.Name).
|
||||
Body(stack).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
return result, err
|
||||
}
|
||||
|
||||
// UpdateStatus was generated because the type contains a Status member.
|
||||
// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus().
|
||||
|
||||
func (c *stacks) UpdateStatus(stack *v1beta1.Stack) (result *v1beta1.Stack, err error) {
|
||||
result = &v1beta1.Stack{}
|
||||
err = c.client.Put().
|
||||
func (c *stacks) UpdateStatus(stack *v1beta1.Stack) (*v1beta1.Stack, error) {
|
||||
result := &v1beta1.Stack{}
|
||||
err := c.client.Put().
|
||||
Namespace(c.ns).
|
||||
Resource("stacks").
|
||||
Name(stack.Name).
|
||||
|
@ -83,7 +82,7 @@ func (c *stacks) UpdateStatus(stack *v1beta1.Stack) (result *v1beta1.Stack, err
|
|||
Body(stack).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
return result, err
|
||||
}
|
||||
|
||||
// Delete takes name of the stack and deletes it. Returns an error if one occurs.
|
||||
|
@ -109,28 +108,28 @@ func (c *stacks) DeleteCollection(options *v1.DeleteOptions, listOptions v1.List
|
|||
}
|
||||
|
||||
// Get takes name of the stack, and returns the corresponding stack object, and an error if there is any.
|
||||
func (c *stacks) Get(name string, options v1.GetOptions) (result *v1beta1.Stack, err error) {
|
||||
result = &v1beta1.Stack{}
|
||||
err = c.client.Get().
|
||||
func (c *stacks) Get(name string, options v1.GetOptions) (*v1beta1.Stack, error) {
|
||||
result := &v1beta1.Stack{}
|
||||
err := c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("stacks").
|
||||
Name(name).
|
||||
VersionedParams(&options, scheme.ParameterCodec).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
return result, err
|
||||
}
|
||||
|
||||
// List takes label and field selectors, and returns the list of Stacks that match those selectors.
|
||||
func (c *stacks) List(opts v1.ListOptions) (result *v1beta1.StackList, err error) {
|
||||
result = &v1beta1.StackList{}
|
||||
err = c.client.Get().
|
||||
func (c *stacks) List(opts v1.ListOptions) (*v1beta1.StackList, error) {
|
||||
result := &v1beta1.StackList{}
|
||||
err := c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("stacks").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
return result, err
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested stacks.
|
||||
|
@ -144,9 +143,9 @@ func (c *stacks) Watch(opts v1.ListOptions) (watch.Interface, error) {
|
|||
}
|
||||
|
||||
// Patch applies the patch and returns the patched stack.
|
||||
func (c *stacks) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Stack, err error) {
|
||||
result = &v1beta1.Stack{}
|
||||
err = c.client.Patch(pt).
|
||||
func (c *stacks) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (*v1beta1.Stack, error) {
|
||||
result := &v1beta1.Stack{}
|
||||
err := c.client.Patch(pt).
|
||||
Namespace(c.ns).
|
||||
Resource("stacks").
|
||||
SubResource(subresources...).
|
||||
|
@ -154,5 +153,5 @@ func (c *stacks) Patch(name string, pt types.PatchType, data []byte, subresource
|
|||
Body(data).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
return result, err
|
||||
}
|
|
@ -0,0 +1,74 @@
|
|||
package v1beta2
|
||||
|
||||
import (
|
||||
"github.com/docker/cli/kubernetes/client/clientset/scheme"
|
||||
v1beta2 "github.com/docker/cli/kubernetes/compose/v1beta2"
|
||||
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
rest "k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
// ComposeV1beta2Interface defines the methods a compose v1beta2 client has
|
||||
type ComposeV1beta2Interface interface {
|
||||
RESTClient() rest.Interface
|
||||
StacksGetter
|
||||
}
|
||||
|
||||
// ComposeV1beta2Client is used to interact with features provided by the compose.docker.com group.
|
||||
type ComposeV1beta2Client struct {
|
||||
restClient rest.Interface
|
||||
}
|
||||
|
||||
// Stacks returns a stack client
|
||||
func (c *ComposeV1beta2Client) Stacks(namespace string) StackInterface {
|
||||
return newStacks(c, namespace)
|
||||
}
|
||||
|
||||
// NewForConfig creates a new ComposeV1beta2Client for the given config.
|
||||
func NewForConfig(c *rest.Config) (*ComposeV1beta2Client, error) {
|
||||
config := *c
|
||||
if err := setConfigDefaults(&config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client, err := rest.RESTClientFor(&config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &ComposeV1beta2Client{client}, nil
|
||||
}
|
||||
|
||||
// NewForConfigOrDie creates a new ComposeV1beta2Client for the given config and
|
||||
// panics if there is an error in the config.
|
||||
func NewForConfigOrDie(c *rest.Config) *ComposeV1beta2Client {
|
||||
client, err := NewForConfig(c)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return client
|
||||
}
|
||||
|
||||
// New creates a new ComposeV1beta2Client for the given RESTClient.
|
||||
func New(c rest.Interface) *ComposeV1beta2Client {
|
||||
return &ComposeV1beta2Client{c}
|
||||
}
|
||||
|
||||
func setConfigDefaults(config *rest.Config) error {
|
||||
gv := v1beta2.SchemeGroupVersion
|
||||
config.GroupVersion = &gv
|
||||
config.APIPath = "/apis"
|
||||
config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
|
||||
|
||||
if config.UserAgent == "" {
|
||||
config.UserAgent = rest.DefaultKubernetesUserAgent()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RESTClient returns a RESTClient that is used to communicate
|
||||
// with API server by this client implementation.
|
||||
func (c *ComposeV1beta2Client) RESTClient() rest.Interface {
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
return c.restClient
|
||||
}
|
|
@ -0,0 +1,155 @@
|
|||
package v1beta2
|
||||
|
||||
import (
|
||||
scheme "github.com/docker/cli/kubernetes/client/clientset/scheme"
|
||||
v1beta2 "github.com/docker/cli/kubernetes/compose/v1beta2"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
rest "k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
// StacksGetter has a method to return a StackInterface.
|
||||
// A group's client should implement this interface.
|
||||
type StacksGetter interface {
|
||||
Stacks(namespace string) StackInterface
|
||||
}
|
||||
|
||||
// StackInterface has methods to work with Stack resources.
|
||||
type StackInterface interface {
|
||||
Create(*v1beta2.Stack) (*v1beta2.Stack, error)
|
||||
Update(*v1beta2.Stack) (*v1beta2.Stack, error)
|
||||
UpdateStatus(*v1beta2.Stack) (*v1beta2.Stack, error)
|
||||
Delete(name string, options *v1.DeleteOptions) error
|
||||
DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
|
||||
Get(name string, options v1.GetOptions) (*v1beta2.Stack, error)
|
||||
List(opts v1.ListOptions) (*v1beta2.StackList, error)
|
||||
Watch(opts v1.ListOptions) (watch.Interface, error)
|
||||
Patch(name string, pt types.PatchType, data []byte, subresources ...string) (*v1beta2.Stack, error)
|
||||
}
|
||||
|
||||
// stacks implements StackInterface
|
||||
type stacks struct {
|
||||
client rest.Interface
|
||||
ns string
|
||||
}
|
||||
|
||||
// newStacks returns a Stacks
|
||||
func newStacks(c *ComposeV1beta2Client, namespace string) *stacks {
|
||||
return &stacks{
|
||||
client: c.RESTClient(),
|
||||
ns: namespace,
|
||||
}
|
||||
}
|
||||
|
||||
// Create takes the representation of a stack and creates it. Returns the server's representation of the stack, and an error, if there is any.
|
||||
func (c *stacks) Create(stack *v1beta2.Stack) (*v1beta2.Stack, error) {
|
||||
result := &v1beta2.Stack{}
|
||||
err := c.client.Post().
|
||||
Namespace(c.ns).
|
||||
Resource("stacks").
|
||||
Body(stack).
|
||||
Do().
|
||||
Into(result)
|
||||
return result, err
|
||||
}
|
||||
|
||||
// Update takes the representation of a stack and updates it. Returns the server's representation of the stack, and an error, if there is any.
|
||||
func (c *stacks) Update(stack *v1beta2.Stack) (*v1beta2.Stack, error) {
|
||||
result := &v1beta2.Stack{}
|
||||
err := c.client.Put().
|
||||
Namespace(c.ns).
|
||||
Resource("stacks").
|
||||
Name(stack.Name).
|
||||
Body(stack).
|
||||
Do().
|
||||
Into(result)
|
||||
return result, err
|
||||
}
|
||||
|
||||
// UpdateStatus was generated because the type contains a Status member.
|
||||
// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus().
|
||||
|
||||
func (c *stacks) UpdateStatus(stack *v1beta2.Stack) (*v1beta2.Stack, error) {
|
||||
result := &v1beta2.Stack{}
|
||||
err := c.client.Put().
|
||||
Namespace(c.ns).
|
||||
Resource("stacks").
|
||||
Name(stack.Name).
|
||||
SubResource("status").
|
||||
Body(stack).
|
||||
Do().
|
||||
Into(result)
|
||||
return result, err
|
||||
}
|
||||
|
||||
// Delete takes name of the stack and deletes it. Returns an error if one occurs.
|
||||
func (c *stacks) Delete(name string, options *v1.DeleteOptions) error {
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("stacks").
|
||||
Name(name).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
}
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *stacks) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("stacks").
|
||||
VersionedParams(&listOptions, scheme.ParameterCodec).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
}
|
||||
|
||||
// Get takes name of the stack, and returns the corresponding stack object, and an error if there is any.
|
||||
func (c *stacks) Get(name string, options v1.GetOptions) (*v1beta2.Stack, error) {
|
||||
result := &v1beta2.Stack{}
|
||||
err := c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("stacks").
|
||||
Name(name).
|
||||
VersionedParams(&options, scheme.ParameterCodec).
|
||||
Do().
|
||||
Into(result)
|
||||
return result, err
|
||||
}
|
||||
|
||||
// List takes label and field selectors, and returns the list of Stacks that match those selectors.
|
||||
func (c *stacks) List(opts v1.ListOptions) (*v1beta2.StackList, error) {
|
||||
result := &v1beta2.StackList{}
|
||||
err := c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("stacks").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Do().
|
||||
Into(result)
|
||||
return result, err
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested stacks.
|
||||
func (c *stacks) Watch(opts v1.ListOptions) (watch.Interface, error) {
|
||||
opts.Watch = true
|
||||
return c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("stacks").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Watch()
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched stack.
|
||||
func (c *stacks) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (*v1beta2.Stack, error) {
|
||||
result := &v1beta2.Stack{}
|
||||
err := c.client.Patch(pt).
|
||||
Namespace(c.ns).
|
||||
Resource("stacks").
|
||||
SubResource(subresources...).
|
||||
Name(name).
|
||||
Body(data).
|
||||
Do().
|
||||
Into(result)
|
||||
return result, err
|
||||
}
|
|
@ -1,4 +0,0 @@
|
|||
// This package is generated by client-gen with custom arguments.
|
||||
|
||||
// This package has the automatically generated clientset.
|
||||
package clientset
|
|
@ -1,4 +0,0 @@
|
|||
// This package is generated by client-gen with custom arguments.
|
||||
|
||||
// This package contains the scheme of the automatically generated clientset.
|
||||
package scheme
|
|
@ -1,4 +0,0 @@
|
|||
// This package is generated by client-gen with custom arguments.
|
||||
|
||||
// This package has the automatically generated typed clients.
|
||||
package v1beta1
|
|
@ -1,3 +0,0 @@
|
|||
package v1beta1
|
||||
|
||||
type StackExpansion interface{}
|
|
@ -0,0 +1,25 @@
|
|||
package compose
|
||||
|
||||
import (
|
||||
"github.com/docker/cli/kubernetes/client/informers/compose/v1beta2"
|
||||
"github.com/docker/cli/kubernetes/client/informers/internalinterfaces"
|
||||
)
|
||||
|
||||
// Interface provides access to each of this group's versions.
|
||||
type Interface interface {
|
||||
V1beta2() v1beta2.Interface
|
||||
}
|
||||
|
||||
type group struct {
|
||||
internalinterfaces.SharedInformerFactory
|
||||
}
|
||||
|
||||
// New returns a new Interface.
|
||||
func New(f internalinterfaces.SharedInformerFactory) Interface {
|
||||
return &group{f}
|
||||
}
|
||||
|
||||
// V1beta2 returns a new v1beta2.Interface.
|
||||
func (g *group) V1beta2() v1beta2.Interface {
|
||||
return v1beta2.New(g.SharedInformerFactory)
|
||||
}
|
|
@ -0,0 +1,25 @@
|
|||
package v1beta2
|
||||
|
||||
import (
|
||||
"github.com/docker/cli/kubernetes/client/informers/internalinterfaces"
|
||||
)
|
||||
|
||||
// Interface provides access to all the informers in this group version.
|
||||
type Interface interface {
|
||||
// Stacks returns a StackInformer.
|
||||
Stacks() StackInformer
|
||||
}
|
||||
|
||||
type version struct {
|
||||
internalinterfaces.SharedInformerFactory
|
||||
}
|
||||
|
||||
// New returns a new Interface.
|
||||
func New(f internalinterfaces.SharedInformerFactory) Interface {
|
||||
return &version{f}
|
||||
}
|
||||
|
||||
// Stacks returns a StackInformer.
|
||||
func (v *version) Stacks() StackInformer {
|
||||
return &stackInformer{factory: v.SharedInformerFactory}
|
||||
}
|
|
@ -0,0 +1,51 @@
|
|||
package v1beta2
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/docker/cli/kubernetes/client/clientset"
|
||||
"github.com/docker/cli/kubernetes/client/informers/internalinterfaces"
|
||||
"github.com/docker/cli/kubernetes/client/listers/compose/v1beta2"
|
||||
compose_v1beta2 "github.com/docker/cli/kubernetes/compose/v1beta2"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// StackInformer provides access to a shared informer and lister for
|
||||
// Stacks.
|
||||
type StackInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() v1beta2.StackLister
|
||||
}
|
||||
|
||||
type stackInformer struct {
|
||||
factory internalinterfaces.SharedInformerFactory
|
||||
}
|
||||
|
||||
func newStackInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
sharedIndexInformer := cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return client.ComposeV1beta2().Stacks(v1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return client.ComposeV1beta2().Stacks(v1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&compose_v1beta2.Stack{},
|
||||
resyncPeriod,
|
||||
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
|
||||
)
|
||||
|
||||
return sharedIndexInformer
|
||||
}
|
||||
|
||||
func (f *stackInformer) Informer() cache.SharedIndexInformer {
|
||||
return f.factory.InformerFor(&compose_v1beta2.Stack{}, newStackInformer)
|
||||
}
|
||||
|
||||
func (f *stackInformer) Lister() v1beta2.StackLister {
|
||||
return v1beta2.NewStackLister(f.Informer().GetIndexer())
|
||||
}
|
|
@ -0,0 +1,101 @@
|
|||
package informers
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/docker/cli/kubernetes/client/clientset"
|
||||
"github.com/docker/cli/kubernetes/client/informers/compose"
|
||||
"github.com/docker/cli/kubernetes/client/informers/internalinterfaces"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
type sharedInformerFactory struct {
|
||||
client clientset.Interface
|
||||
lock sync.Mutex
|
||||
defaultResync time.Duration
|
||||
|
||||
informers map[reflect.Type]cache.SharedIndexInformer
|
||||
// startedInformers is used for tracking which informers have been started.
|
||||
// This allows Start() to be called multiple times safely.
|
||||
startedInformers map[reflect.Type]bool
|
||||
}
|
||||
|
||||
// NewSharedInformerFactory constructs a new instance of sharedInformerFactory
|
||||
func NewSharedInformerFactory(client clientset.Interface, defaultResync time.Duration) SharedInformerFactory {
|
||||
return &sharedInformerFactory{
|
||||
client: client,
|
||||
defaultResync: defaultResync,
|
||||
informers: make(map[reflect.Type]cache.SharedIndexInformer),
|
||||
startedInformers: make(map[reflect.Type]bool),
|
||||
}
|
||||
}
|
||||
|
||||
// Start initializes all requested informers.
|
||||
func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
for informerType, informer := range f.informers {
|
||||
if !f.startedInformers[informerType] {
|
||||
go informer.Run(stopCh)
|
||||
f.startedInformers[informerType] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WaitForCacheSync waits for all started informers' cache were synced.
|
||||
func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool {
|
||||
informers := func() map[reflect.Type]cache.SharedIndexInformer {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
informers := map[reflect.Type]cache.SharedIndexInformer{}
|
||||
for informerType, informer := range f.informers {
|
||||
if f.startedInformers[informerType] {
|
||||
informers[informerType] = informer
|
||||
}
|
||||
}
|
||||
return informers
|
||||
}()
|
||||
|
||||
res := map[reflect.Type]bool{}
|
||||
for informType, informer := range informers {
|
||||
res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// InternalInformerFor returns the SharedIndexInformer for obj using an internal
|
||||
// client.
|
||||
func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
informerType := reflect.TypeOf(obj)
|
||||
informer, exists := f.informers[informerType]
|
||||
if exists {
|
||||
return informer
|
||||
}
|
||||
informer = newFunc(f.client, f.defaultResync)
|
||||
f.informers[informerType] = informer
|
||||
|
||||
return informer
|
||||
}
|
||||
|
||||
// SharedInformerFactory provides shared informers for resources in all known
|
||||
// API group versions.
|
||||
type SharedInformerFactory interface {
|
||||
internalinterfaces.SharedInformerFactory
|
||||
ForResource(resource schema.GroupVersionResource) (GenericInformer, error)
|
||||
WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool
|
||||
|
||||
Compose() compose.Interface
|
||||
}
|
||||
|
||||
func (f *sharedInformerFactory) Compose() compose.Interface {
|
||||
return compose.New(f)
|
||||
}
|
|
@ -0,0 +1,44 @@
|
|||
package informers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/cli/kubernetes/compose/v1beta2"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// GenericInformer is type of SharedIndexInformer which will locate and delegate to other
|
||||
// sharedInformers based on type
|
||||
type GenericInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() cache.GenericLister
|
||||
}
|
||||
|
||||
type genericInformer struct {
|
||||
informer cache.SharedIndexInformer
|
||||
resource schema.GroupResource
|
||||
}
|
||||
|
||||
// Informer returns the SharedIndexInformer.
|
||||
func (f *genericInformer) Informer() cache.SharedIndexInformer {
|
||||
return f.informer
|
||||
}
|
||||
|
||||
// Lister returns the GenericLister.
|
||||
func (f *genericInformer) Lister() cache.GenericLister {
|
||||
return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource)
|
||||
}
|
||||
|
||||
// ForResource gives generic access to a shared informer of the matching type
|
||||
// TODO extend this to unknown resources with a client pool
|
||||
func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) {
|
||||
switch resource {
|
||||
// Group=Compose, Version=V1beta1
|
||||
case v1beta2.SchemeGroupVersion.WithResource("stacks"):
|
||||
return &genericInformer{resource: resource.GroupResource(), informer: f.Compose().V1beta2().Stacks().Informer()}, nil
|
||||
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("no informer found for %v", resource)
|
||||
}
|
|
@ -0,0 +1,18 @@
|
|||
package internalinterfaces
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/docker/cli/kubernetes/client/clientset"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// NewInformerFunc defines a Informer constructor (from a clientset and a duration)
|
||||
type NewInformerFunc func(clientset.Interface, time.Duration) cache.SharedIndexInformer
|
||||
|
||||
// SharedInformerFactory a small interface to allow for adding an informer without an import cycle
|
||||
type SharedInformerFactory interface {
|
||||
Start(stopCh <-chan struct{})
|
||||
InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer
|
||||
}
|
|
@ -0,0 +1,9 @@
|
|||
package v1beta2
|
||||
|
||||
// StackListerExpansion allows custom methods to be added to
|
||||
// StackLister.
|
||||
type StackListerExpansion interface{}
|
||||
|
||||
// StackNamespaceListerExpansion allows custom methods to be added to
|
||||
// StackNamespaceLister.
|
||||
type StackNamespaceListerExpansion interface{}
|
|
@ -0,0 +1,78 @@
|
|||
package v1beta2
|
||||
|
||||
import (
|
||||
"github.com/docker/cli/kubernetes/compose/v1beta2"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// StackLister helps list Stacks.
|
||||
type StackLister interface {
|
||||
// List lists all Stacks in the indexer.
|
||||
List(selector labels.Selector) ([]*v1beta2.Stack, error)
|
||||
// Stacks returns an object that can list and get Stacks.
|
||||
Stacks(namespace string) StackNamespaceLister
|
||||
StackListerExpansion
|
||||
}
|
||||
|
||||
// stackLister implements the StackLister interface.
|
||||
type stackLister struct {
|
||||
indexer cache.Indexer
|
||||
}
|
||||
|
||||
// NewStackLister returns a new StackLister.
|
||||
func NewStackLister(indexer cache.Indexer) StackLister {
|
||||
return &stackLister{indexer: indexer}
|
||||
}
|
||||
|
||||
// List lists all Stacks in the indexer.
|
||||
func (s *stackLister) List(selector labels.Selector) ([]*v1beta2.Stack, error) {
|
||||
stacks := []*v1beta2.Stack{}
|
||||
err := cache.ListAll(s.indexer, selector, func(m interface{}) {
|
||||
stacks = append(stacks, m.(*v1beta2.Stack))
|
||||
})
|
||||
return stacks, err
|
||||
}
|
||||
|
||||
// Stacks returns an object that can list and get Stacks.
|
||||
func (s *stackLister) Stacks(namespace string) StackNamespaceLister {
|
||||
return stackNamespaceLister{indexer: s.indexer, namespace: namespace}
|
||||
}
|
||||
|
||||
// StackNamespaceLister helps list and get Stacks.
|
||||
type StackNamespaceLister interface {
|
||||
// List lists all Stacks in the indexer for a given namespace.
|
||||
List(selector labels.Selector) ([]*v1beta2.Stack, error)
|
||||
// Get retrieves the Stack from the indexer for a given namespace and name.
|
||||
Get(name string) (*v1beta2.Stack, error)
|
||||
StackNamespaceListerExpansion
|
||||
}
|
||||
|
||||
// stackNamespaceLister implements the StackNamespaceLister
|
||||
// interface.
|
||||
type stackNamespaceLister struct {
|
||||
indexer cache.Indexer
|
||||
namespace string
|
||||
}
|
||||
|
||||
// List lists all Stacks in the indexer for a given namespace.
|
||||
func (s stackNamespaceLister) List(selector labels.Selector) ([]*v1beta2.Stack, error) {
|
||||
stacks := []*v1beta2.Stack{}
|
||||
err := cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
|
||||
stacks = append(stacks, m.(*v1beta2.Stack))
|
||||
})
|
||||
return stacks, err
|
||||
}
|
||||
|
||||
// Get retrieves the Stack from the indexer for a given namespace and name.
|
||||
func (s stackNamespaceLister) Get(name string) (*v1beta2.Stack, error) {
|
||||
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !exists {
|
||||
return nil, errors.NewNotFound(v1beta2.GroupResource("stack"), name)
|
||||
}
|
||||
return obj.(*v1beta2.Stack), nil
|
||||
}
|
|
@ -0,0 +1,25 @@
|
|||
package clone
|
||||
|
||||
// MapOfStringToSliceOfString deep copy a map[string][]string
|
||||
func MapOfStringToSliceOfString(source map[string][]string) map[string][]string {
|
||||
if source == nil {
|
||||
return nil
|
||||
}
|
||||
res := make(map[string][]string, len(source))
|
||||
for k, v := range source {
|
||||
res[k] = SliceOfString(v)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// MapOfStringToInt deep copy a map[string]int
|
||||
func MapOfStringToInt(source map[string]int) map[string]int {
|
||||
if source == nil {
|
||||
return nil
|
||||
}
|
||||
res := make(map[string]int, len(source))
|
||||
for k, v := range source {
|
||||
res[k] = v
|
||||
}
|
||||
return res
|
||||
}
|
|
@ -0,0 +1,11 @@
|
|||
package clone
|
||||
|
||||
// SliceOfString deep copy a slice of strings
|
||||
func SliceOfString(source []string) []string {
|
||||
if source == nil {
|
||||
return nil
|
||||
}
|
||||
res := make([]string, len(source))
|
||||
copy(res, source)
|
||||
return res
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
package impersonation
|
||||
|
||||
import "github.com/docker/cli/kubernetes/compose/clone"
|
||||
|
||||
// Config contains the data required to impersonate a user.
|
||||
type Config struct {
|
||||
// UserName is the username to impersonate on each request.
|
||||
UserName string
|
||||
// Groups are the groups to impersonate on each request.
|
||||
Groups []string
|
||||
// Extra is a free-form field which can be used to link some authentication information
|
||||
// to authorization information. This field allows you to impersonate it.
|
||||
Extra map[string][]string
|
||||
}
|
||||
|
||||
// Clone clones the impersonation config
|
||||
func (ic *Config) Clone() *Config {
|
||||
if ic == nil {
|
||||
return nil
|
||||
}
|
||||
result := new(Config)
|
||||
result.UserName = ic.UserName
|
||||
result.Groups = clone.SliceOfString(ic.Groups)
|
||||
result.Extra = clone.MapOfStringToSliceOfString(ic.Extra)
|
||||
return result
|
||||
}
|
|
@ -1,43 +0,0 @@
|
|||
package compose
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
// GroupName is the group name used to register these objects
|
||||
const GroupName = "compose.docker.com"
|
||||
|
||||
// SchemeGroupVersion is group version used to register these objects
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
|
||||
|
||||
// Kind takes an unqualified kind and returns back a Group qualified GroupKind
|
||||
func Kind(kind string) schema.GroupKind {
|
||||
return SchemeGroupVersion.WithKind(kind).GroupKind()
|
||||
}
|
||||
|
||||
// Resource takes an unqualified resource and returns back a Group qualified GroupResource
|
||||
func Resource(resource string) schema.GroupResource {
|
||||
return SchemeGroupVersion.WithResource(resource).GroupResource()
|
||||
}
|
||||
|
||||
var (
|
||||
// SchemeBuilder collects functions that add things to a scheme. It's to allow
|
||||
// code to compile without explicitly referencing generated types. You should
|
||||
// declare one in each package that will have generated deep copy or conversion
|
||||
// functions.
|
||||
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
|
||||
|
||||
// AddToScheme applies all the stored functions to the scheme. A non-nil error
|
||||
// indicates that one function failed and the attempt was abandoned.
|
||||
AddToScheme = SchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
// adds the list of known types to api.Scheme.
|
||||
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||
&Stack{},
|
||||
&StackList{},
|
||||
)
|
||||
return nil
|
||||
}
|
|
@ -1,117 +0,0 @@
|
|||
package compose
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// ImpersonationConfig holds information use to impersonate calls from the compose controller
|
||||
type ImpersonationConfig struct {
|
||||
// UserName is the username to impersonate on each request.
|
||||
UserName string
|
||||
// Groups are the groups to impersonate on each request.
|
||||
Groups []string
|
||||
// Extra is a free-form field which can be used to link some authentication information
|
||||
// to authorization information. This field allows you to impersonate it.
|
||||
Extra map[string][]string
|
||||
}
|
||||
|
||||
// Stack defines a stack object to be register in the kubernetes API
|
||||
// +genclient=true
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
type Stack struct {
|
||||
metav1.TypeMeta
|
||||
metav1.ObjectMeta
|
||||
Spec StackSpec
|
||||
Status StackStatus
|
||||
}
|
||||
|
||||
// StackStatus defines the observed state of Stack
|
||||
type StackStatus struct {
|
||||
Phase StackPhase
|
||||
Message string
|
||||
}
|
||||
|
||||
// StackSpec defines the desired state of Stack
|
||||
type StackSpec struct {
|
||||
ComposeFile string
|
||||
Owner ImpersonationConfig
|
||||
}
|
||||
|
||||
// StackPhase defines the status phase in which the stack is.
|
||||
type StackPhase string
|
||||
|
||||
// These are valid conditions of a stack.
|
||||
const (
|
||||
// Available means the stack is available.
|
||||
StackAvailable StackPhase = "Available"
|
||||
// Progressing means the deployment is progressing.
|
||||
StackProgressing StackPhase = "Progressing"
|
||||
// StackFailure is added in a stack when one of its members fails to be created
|
||||
// or deleted.
|
||||
StackFailure StackPhase = "Failure"
|
||||
)
|
||||
|
||||
// StackList defines a list of stacks
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
type StackList struct {
|
||||
metav1.TypeMeta
|
||||
metav1.ListMeta
|
||||
Items []Stack
|
||||
}
|
||||
|
||||
// Owner defines the owner of a stack. It is used to impersonate the controller calls
|
||||
// to kubernetes api.
|
||||
type Owner struct {
|
||||
metav1.TypeMeta
|
||||
metav1.ObjectMeta
|
||||
Owner ImpersonationConfig
|
||||
}
|
||||
|
||||
// OwnerList defines a list of owner.
|
||||
type OwnerList struct {
|
||||
metav1.TypeMeta
|
||||
metav1.ListMeta
|
||||
Items []Owner
|
||||
}
|
||||
|
||||
// FIXME(vdemeester) are those necessary ??
|
||||
|
||||
// NewStatus is newStatus
|
||||
func (Stack) NewStatus() interface{} {
|
||||
return StackStatus{}
|
||||
}
|
||||
|
||||
// GetStatus returns the status
|
||||
func (pc *Stack) GetStatus() interface{} {
|
||||
return pc.Status
|
||||
}
|
||||
|
||||
// SetStatus sets the status
|
||||
func (pc *Stack) SetStatus(s interface{}) {
|
||||
pc.Status = s.(StackStatus)
|
||||
}
|
||||
|
||||
// GetSpec returns the spec
|
||||
func (pc *Stack) GetSpec() interface{} {
|
||||
return pc.Spec
|
||||
}
|
||||
|
||||
// SetSpec sets the spec
|
||||
func (pc *Stack) SetSpec(s interface{}) {
|
||||
pc.Spec = s.(StackSpec)
|
||||
}
|
||||
|
||||
// GetObjectMeta returns the ObjectMeta
|
||||
func (pc *Stack) GetObjectMeta() *metav1.ObjectMeta {
|
||||
return &pc.ObjectMeta
|
||||
}
|
||||
|
||||
// SetGeneration sets the Generation
|
||||
func (pc *Stack) SetGeneration(generation int64) {
|
||||
pc.ObjectMeta.Generation = generation
|
||||
}
|
||||
|
||||
// GetGeneration returns the Generation
|
||||
func (pc Stack) GetGeneration() int64 {
|
||||
return pc.ObjectMeta.Generation
|
||||
}
|
|
@ -1,11 +1,11 @@
|
|||
// Package v1beta1 holds the v1beta1 versions of our stack structures.
|
||||
// API versions allow the api contract for a resource to be changed while keeping
|
||||
// Api versions allow the api contract for a resource to be changed while keeping
|
||||
// backward compatibility by support multiple concurrent versions
|
||||
// of the same resource
|
||||
//
|
||||
|
||||
// +k8s:openapi-gen=true
|
||||
// +k8s:deepcopy-gen=package,register
|
||||
// +k8s:conversion-gen=github.com/docker/cli/kubernetes/compose
|
||||
// +k8s:defaulter-gen=TypeMeta
|
||||
// +groupName=compose.docker.com
|
||||
|
||||
// Package v1beta1 is the first version of the Stack spec, containing only a compose file
|
||||
package v1beta1 // import "github.com/docker/cli/kubernetes/compose/v1beta1"
|
||||
|
|
|
@ -0,0 +1,31 @@
|
|||
package v1beta1
|
||||
|
||||
import (
|
||||
"github.com/docker/cli/kubernetes/compose/impersonation"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// Owner defines the owner of a stack. It is used to impersonate the controller calls
|
||||
// to kubernetes api.
|
||||
type Owner struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
Owner impersonation.Config `json:"owner,omitempty"`
|
||||
}
|
||||
|
||||
func (o *Owner) clone() *Owner {
|
||||
if o == nil {
|
||||
return nil
|
||||
}
|
||||
result := new(Owner)
|
||||
result.TypeMeta = o.TypeMeta
|
||||
result.ObjectMeta = o.ObjectMeta
|
||||
result.Owner = *result.Owner.Clone()
|
||||
return result
|
||||
}
|
||||
|
||||
// DeepCopyObject clones the owner
|
||||
func (o *Owner) DeepCopyObject() runtime.Object {
|
||||
return o.clone()
|
||||
}
|
|
@ -1,25 +0,0 @@
|
|||
package v1beta1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/docker/cli/kubernetes/compose"
|
||||
)
|
||||
|
||||
// Owner defines the owner of a stack. It is used to impersonate the controller calls
|
||||
// to kubernetes api.
|
||||
// +genclient=true
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +subresource-request
|
||||
type Owner struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
Owner compose.ImpersonationConfig `json:"owner,omitempty"`
|
||||
}
|
||||
|
||||
// OwnerList defines a list of owner.
|
||||
type OwnerList struct {
|
||||
metav1.TypeMeta
|
||||
metav1.ListMeta
|
||||
Items []Owner
|
||||
}
|
|
@ -9,19 +9,12 @@ import (
|
|||
// GroupName is the group name used to register these objects
|
||||
const GroupName = "compose.docker.com"
|
||||
|
||||
// SchemeGroupVersion is group version used to register these objects
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
|
||||
|
||||
// Alias variables for the registration
|
||||
var (
|
||||
// SchemeBuilder collects functions that add things to a scheme. It's to allow
|
||||
// code to compile without explicitly referencing generated types. You should
|
||||
// declare one in each package that will have generated deep copy or conversion
|
||||
// functions.
|
||||
// SchemeGroupVersion is group version used to register these objects
|
||||
SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
|
||||
SchemeBuilder runtime.SchemeBuilder
|
||||
localSchemeBuilder = &SchemeBuilder
|
||||
|
||||
// AddToScheme applies all the stored functions to the scheme. A non-nil error
|
||||
// indicates that one function failed and the attempt was abandoned.
|
||||
AddToScheme = localSchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
|
@ -35,7 +28,6 @@ func addKnownTypes(scheme *runtime.Scheme) error {
|
|||
&Stack{},
|
||||
&StackList{},
|
||||
&Owner{},
|
||||
&OwnerList{},
|
||||
)
|
||||
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
|
||||
return nil
|
||||
|
|
|
@ -6,7 +6,6 @@ import (
|
|||
)
|
||||
|
||||
// StackList defines a list of stacks
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
type StackList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
@ -14,13 +13,25 @@ type StackList struct {
|
|||
Items []Stack `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// +genclient=true
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// DeepCopyObject clones the stack list
|
||||
func (s *StackList) DeepCopyObject() runtime.Object {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
result := new(StackList)
|
||||
result.TypeMeta = s.TypeMeta
|
||||
result.ListMeta = s.ListMeta
|
||||
if s.Items == nil {
|
||||
return result
|
||||
}
|
||||
result.Items = make([]Stack, len(s.Items))
|
||||
for ix, s := range s.Items {
|
||||
result.Items[ix] = *s.clone()
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Stack defines a stack object to be register in the kubernetes API
|
||||
// +k8s:openapi-gen=true
|
||||
// +resource:path=stacks,strategy=StackStrategy
|
||||
// +subresource:request=Owner,path=owner,rest=OwnerStackREST
|
||||
type Stack struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
@ -39,9 +50,9 @@ type StackPhase string
|
|||
|
||||
// These are valid conditions of a stack.
|
||||
const (
|
||||
// Available means the stack is available.
|
||||
// StackAvailable means the stack is available.
|
||||
StackAvailable StackPhase = "Available"
|
||||
// Progressing means the deployment is progressing.
|
||||
// StackProgressing means the deployment is progressing.
|
||||
StackProgressing StackPhase = "Progressing"
|
||||
// StackFailure is added in a stack when one of its members fails to be created
|
||||
// or deleted.
|
||||
|
@ -51,18 +62,26 @@ const (
|
|||
// StackStatus defines the observed state of Stack
|
||||
type StackStatus struct {
|
||||
// Current condition of the stack.
|
||||
// +optional
|
||||
Phase StackPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=StackPhase"`
|
||||
// A human readable message indicating details about the stack.
|
||||
// +optional
|
||||
Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
|
||||
}
|
||||
|
||||
// Clone implements the Cloner interface for kubernetes
|
||||
func (s *Stack) Clone() (*Stack, error) {
|
||||
scheme := runtime.NewScheme()
|
||||
if err := AddToScheme(scheme); err != nil {
|
||||
return nil, err
|
||||
func (s *Stack) clone() *Stack {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
return s.DeepCopy(), nil
|
||||
// in v1beta1, Stack has no pointer, slice or map. Plain old struct copy is ok
|
||||
result := *s
|
||||
return &result
|
||||
}
|
||||
|
||||
// Clone implements the Cloner interface for kubernetes
|
||||
func (s *Stack) Clone() *Stack {
|
||||
return s.clone()
|
||||
}
|
||||
|
||||
// DeepCopyObject clones the stack
|
||||
func (s *Stack) DeepCopyObject() runtime.Object {
|
||||
return s.clone()
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
package v1beta1
|
|
@ -1,9 +0,0 @@
|
|||
version: "3.2"
|
||||
services:
|
||||
nginx:
|
||||
image: nginx
|
||||
deploy:
|
||||
replicas: 2
|
||||
|
||||
redis:
|
||||
image: redis:alpine
|
|
@ -1,10 +0,0 @@
|
|||
version: "3.2"
|
||||
services:
|
||||
nginx:
|
||||
image: nginx
|
||||
deploy:
|
||||
replicas: 2
|
||||
redis:
|
||||
image: redis:alpine
|
||||
deploy:
|
||||
replicas: 5
|
|
@ -1,10 +0,0 @@
|
|||
version: "3.2"
|
||||
services:
|
||||
redis:
|
||||
image: redis:alpine
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 64M
|
||||
reservations:
|
||||
memory: 64M
|
|
@ -1,11 +0,0 @@
|
|||
version: "3.2"
|
||||
services:
|
||||
redis:
|
||||
image: redis:alpine
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 64M
|
||||
reservations:
|
||||
memory: 64M
|
||||
replicas: 5
|
|
@ -1,6 +0,0 @@
|
|||
version: "3.2"
|
||||
services:
|
||||
redis:
|
||||
image: redis:alpine
|
||||
deploy:
|
||||
replicas: 2
|
|
@ -1,6 +0,0 @@
|
|||
version: "3.2"
|
||||
services:
|
||||
redis:
|
||||
image: redis:alpine
|
||||
deploy:
|
||||
replicas: 5
|
|
@ -1,4 +0,0 @@
|
|||
version: "3.2"
|
||||
services:
|
||||
redis:
|
||||
image: redis:alpine
|
|
@ -1,6 +0,0 @@
|
|||
version: "3.2"
|
||||
services:
|
||||
redis:
|
||||
image: redis:alpine
|
||||
deploy:
|
||||
replicas: 5
|
|
@ -1,169 +0,0 @@
|
|||
// +build !ignore_autogenerated
|
||||
|
||||
// This file was autogenerated by conversion-gen. Do not edit it manually!
|
||||
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
compose "github.com/docker/cli/kubernetes/compose"
|
||||
conversion "k8s.io/apimachinery/pkg/conversion"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
func init() {
|
||||
localSchemeBuilder.Register(RegisterConversions)
|
||||
}
|
||||
|
||||
// RegisterConversions adds conversion functions to the given scheme.
|
||||
// Public to allow building arbitrary schemes.
|
||||
func RegisterConversions(scheme *runtime.Scheme) error {
|
||||
return scheme.AddGeneratedConversionFuncs(
|
||||
Convert_v1beta1_Owner_To_compose_Owner,
|
||||
Convert_compose_Owner_To_v1beta1_Owner,
|
||||
Convert_v1beta1_Stack_To_compose_Stack,
|
||||
Convert_compose_Stack_To_v1beta1_Stack,
|
||||
Convert_v1beta1_StackList_To_compose_StackList,
|
||||
Convert_compose_StackList_To_v1beta1_StackList,
|
||||
Convert_v1beta1_StackSpec_To_compose_StackSpec,
|
||||
Convert_compose_StackSpec_To_v1beta1_StackSpec,
|
||||
Convert_v1beta1_StackStatus_To_compose_StackStatus,
|
||||
Convert_compose_StackStatus_To_v1beta1_StackStatus,
|
||||
)
|
||||
}
|
||||
|
||||
func autoConvert_v1beta1_Owner_To_compose_Owner(in *Owner, out *compose.Owner, s conversion.Scope) error {
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
out.Owner = in.Owner
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1beta1_Owner_To_compose_Owner is an autogenerated conversion function.
|
||||
func Convert_v1beta1_Owner_To_compose_Owner(in *Owner, out *compose.Owner, s conversion.Scope) error {
|
||||
return autoConvert_v1beta1_Owner_To_compose_Owner(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_compose_Owner_To_v1beta1_Owner(in *compose.Owner, out *Owner, s conversion.Scope) error {
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
out.Owner = in.Owner
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_compose_Owner_To_v1beta1_Owner is an autogenerated conversion function.
|
||||
func Convert_compose_Owner_To_v1beta1_Owner(in *compose.Owner, out *Owner, s conversion.Scope) error {
|
||||
return autoConvert_compose_Owner_To_v1beta1_Owner(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1beta1_Stack_To_compose_Stack(in *Stack, out *compose.Stack, s conversion.Scope) error {
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
if err := Convert_v1beta1_StackSpec_To_compose_StackSpec(&in.Spec, &out.Spec, s); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := Convert_v1beta1_StackStatus_To_compose_StackStatus(&in.Status, &out.Status, s); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1beta1_Stack_To_compose_Stack is an autogenerated conversion function.
|
||||
func Convert_v1beta1_Stack_To_compose_Stack(in *Stack, out *compose.Stack, s conversion.Scope) error {
|
||||
return autoConvert_v1beta1_Stack_To_compose_Stack(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_compose_Stack_To_v1beta1_Stack(in *compose.Stack, out *Stack, s conversion.Scope) error {
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
if err := Convert_compose_StackSpec_To_v1beta1_StackSpec(&in.Spec, &out.Spec, s); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := Convert_compose_StackStatus_To_v1beta1_StackStatus(&in.Status, &out.Status, s); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_compose_Stack_To_v1beta1_Stack is an autogenerated conversion function.
|
||||
func Convert_compose_Stack_To_v1beta1_Stack(in *compose.Stack, out *Stack, s conversion.Scope) error {
|
||||
return autoConvert_compose_Stack_To_v1beta1_Stack(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1beta1_StackList_To_compose_StackList(in *StackList, out *compose.StackList, s conversion.Scope) error {
|
||||
out.ListMeta = in.ListMeta
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]compose.Stack, len(*in))
|
||||
for i := range *in {
|
||||
if err := Convert_v1beta1_Stack_To_compose_Stack(&(*in)[i], &(*out)[i], s); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
out.Items = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1beta1_StackList_To_compose_StackList is an autogenerated conversion function.
|
||||
func Convert_v1beta1_StackList_To_compose_StackList(in *StackList, out *compose.StackList, s conversion.Scope) error {
|
||||
return autoConvert_v1beta1_StackList_To_compose_StackList(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_compose_StackList_To_v1beta1_StackList(in *compose.StackList, out *StackList, s conversion.Scope) error {
|
||||
out.ListMeta = in.ListMeta
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]Stack, len(*in))
|
||||
for i := range *in {
|
||||
if err := Convert_compose_Stack_To_v1beta1_Stack(&(*in)[i], &(*out)[i], s); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
out.Items = make([]Stack, 0)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_compose_StackList_To_v1beta1_StackList is an autogenerated conversion function.
|
||||
func Convert_compose_StackList_To_v1beta1_StackList(in *compose.StackList, out *StackList, s conversion.Scope) error {
|
||||
return autoConvert_compose_StackList_To_v1beta1_StackList(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1beta1_StackSpec_To_compose_StackSpec(in *StackSpec, out *compose.StackSpec, s conversion.Scope) error {
|
||||
out.ComposeFile = in.ComposeFile
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_compose_StackSpec_To_v1beta1_StackSpec(in *compose.StackSpec, out *StackSpec, s conversion.Scope) error {
|
||||
return autoConvert_compose_StackSpec_To_v1beta1_StackSpec(in, out, s)
|
||||
}
|
||||
|
||||
// Convert_v1beta1_StackSpec_To_compose_StackSpec is an autogenerated conversion function.
|
||||
func Convert_v1beta1_StackSpec_To_compose_StackSpec(in *StackSpec, out *compose.StackSpec, s conversion.Scope) error {
|
||||
return autoConvert_v1beta1_StackSpec_To_compose_StackSpec(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_compose_StackSpec_To_v1beta1_StackSpec(in *compose.StackSpec, out *StackSpec, s conversion.Scope) error {
|
||||
out.ComposeFile = in.ComposeFile
|
||||
return nil
|
||||
}
|
||||
|
||||
func autoConvert_v1beta1_StackStatus_To_compose_StackStatus(in *StackStatus, out *compose.StackStatus, s conversion.Scope) error {
|
||||
out.Phase = compose.StackPhase(in.Phase)
|
||||
out.Message = in.Message
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1beta1_StackStatus_To_compose_StackStatus is an autogenerated conversion function.
|
||||
func Convert_v1beta1_StackStatus_To_compose_StackStatus(in *StackStatus, out *compose.StackStatus, s conversion.Scope) error {
|
||||
return autoConvert_v1beta1_StackStatus_To_compose_StackStatus(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_compose_StackStatus_To_v1beta1_StackStatus(in *compose.StackStatus, out *StackStatus, s conversion.Scope) error {
|
||||
out.Phase = StackPhase(in.Phase)
|
||||
out.Message = in.Message
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_compose_StackStatus_To_v1beta1_StackStatus is an autogenerated conversion function.
|
||||
func Convert_compose_StackStatus_To_v1beta1_StackStatus(in *compose.StackStatus, out *StackStatus, s conversion.Scope) error {
|
||||
return autoConvert_compose_StackStatus_To_v1beta1_StackStatus(in, out, s)
|
||||
}
|
|
@ -1,205 +0,0 @@
|
|||
// +build !ignore_autogenerated
|
||||
|
||||
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
|
||||
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
reflect "reflect"
|
||||
|
||||
conversion "k8s.io/apimachinery/pkg/conversion"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// Deprecated: register deep-copy functions.
|
||||
func init() {
|
||||
SchemeBuilder.Register(RegisterDeepCopies)
|
||||
}
|
||||
|
||||
// Deprecated: RegisterDeepCopies adds deep-copy functions to the given scheme. Public
|
||||
// to allow building arbitrary schemes.
|
||||
func RegisterDeepCopies(scheme *runtime.Scheme) error {
|
||||
return scheme.AddGeneratedDeepCopyFuncs(
|
||||
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*Owner).DeepCopyInto(out.(*Owner))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&Owner{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*OwnerList).DeepCopyInto(out.(*OwnerList))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&OwnerList{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*Stack).DeepCopyInto(out.(*Stack))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&Stack{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*StackList).DeepCopyInto(out.(*StackList))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&StackList{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*StackSpec).DeepCopyInto(out.(*StackSpec))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&StackSpec{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*StackStatus).DeepCopyInto(out.(*StackStatus))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&StackStatus{})},
|
||||
)
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Owner) DeepCopyInto(out *Owner) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Owner.DeepCopyInto(&out.Owner)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, creating a new Owner.
|
||||
func (x *Owner) DeepCopy() *Owner {
|
||||
if x == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Owner)
|
||||
x.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (x *Owner) DeepCopyObject() runtime.Object {
|
||||
if c := x.DeepCopy(); c != nil {
|
||||
return c
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *OwnerList) DeepCopyInto(out *OwnerList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.ListMeta = in.ListMeta
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]Owner, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, creating a new OwnerList.
|
||||
func (x *OwnerList) DeepCopy() *OwnerList {
|
||||
if x == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(OwnerList)
|
||||
x.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (x *OwnerList) DeepCopyObject() runtime.Object {
|
||||
if c := x.DeepCopy(); c != nil {
|
||||
return c
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Stack) DeepCopyInto(out *Stack) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
out.Spec = in.Spec
|
||||
out.Status = in.Status
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, creating a new Stack.
|
||||
func (x *Stack) DeepCopy() *Stack {
|
||||
if x == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Stack)
|
||||
x.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (x *Stack) DeepCopyObject() runtime.Object {
|
||||
if c := x.DeepCopy(); c != nil {
|
||||
return c
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *StackList) DeepCopyInto(out *StackList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.ListMeta = in.ListMeta
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]Stack, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, creating a new StackList.
|
||||
func (x *StackList) DeepCopy() *StackList {
|
||||
if x == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(StackList)
|
||||
x.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (x *StackList) DeepCopyObject() runtime.Object {
|
||||
if c := x.DeepCopy(); c != nil {
|
||||
return c
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *StackSpec) DeepCopyInto(out *StackSpec) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, creating a new StackSpec.
|
||||
func (x *StackSpec) DeepCopy() *StackSpec {
|
||||
if x == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(StackSpec)
|
||||
x.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *StackStatus) DeepCopyInto(out *StackStatus) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, creating a new StackStatus.
|
||||
func (x *StackStatus) DeepCopy() *StackStatus {
|
||||
if x == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(StackStatus)
|
||||
x.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
package v1beta2
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// ComposeFile is the content of a stack's compose file if any
|
||||
type ComposeFile struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
ComposeFile string `json:"composeFile,omitempty"`
|
||||
}
|
||||
|
||||
func (c *ComposeFile) clone() *ComposeFile {
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
res := *c
|
||||
return &res
|
||||
}
|
||||
|
||||
// DeepCopyObject clones the ComposeFile
|
||||
func (c *ComposeFile) DeepCopyObject() runtime.Object {
|
||||
return c.clone()
|
||||
}
|
|
@ -0,0 +1,6 @@
|
|||
// Api versions allow the api contract for a resource to be changed while keeping
|
||||
// backward compatibility by support multiple concurrent versions
|
||||
// of the same resource
|
||||
|
||||
// Package v1beta2 is the second version of the stack, containing a structured spec
|
||||
package v1beta2 // import "github.com/docker/cli/kubernetes/compose/v1beta2"
|
|
@ -0,0 +1,30 @@
|
|||
package v1beta2
|
||||
|
||||
import (
|
||||
"github.com/docker/cli/kubernetes/compose/impersonation"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// Owner describes the user who created the stack
|
||||
type Owner struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
Owner impersonation.Config `json:"owner,omitempty"`
|
||||
}
|
||||
|
||||
func (o *Owner) clone() *Owner {
|
||||
if o == nil {
|
||||
return nil
|
||||
}
|
||||
result := new(Owner)
|
||||
result.TypeMeta = o.TypeMeta
|
||||
result.ObjectMeta = o.ObjectMeta
|
||||
result.Owner = *result.Owner.Clone()
|
||||
return result
|
||||
}
|
||||
|
||||
// DeepCopyObject clones the owner
|
||||
func (o *Owner) DeepCopyObject() runtime.Object {
|
||||
return o.clone()
|
||||
}
|
|
@ -0,0 +1,42 @@
|
|||
package v1beta2
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
// GroupName is the name of the compose group
|
||||
const GroupName = "compose.docker.com"
|
||||
|
||||
var (
|
||||
// SchemeGroupVersion is group version used to register these objects
|
||||
SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta2"}
|
||||
// SchemeBuilder is the scheme builder
|
||||
SchemeBuilder runtime.SchemeBuilder
|
||||
localSchemeBuilder = &SchemeBuilder
|
||||
// AddToScheme adds to scheme
|
||||
AddToScheme = localSchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
func init() {
|
||||
localSchemeBuilder.Register(addKnownTypes)
|
||||
}
|
||||
|
||||
// Adds the list of known types to api.Scheme.
|
||||
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||
&Stack{},
|
||||
&StackList{},
|
||||
&Owner{},
|
||||
&ComposeFile{},
|
||||
&Scale{},
|
||||
)
|
||||
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
|
||||
return nil
|
||||
}
|
||||
|
||||
// GroupResource takes an unqualified resource and returns a Group qualified GroupResource
|
||||
func GroupResource(resource string) schema.GroupResource {
|
||||
return SchemeGroupVersion.WithResource(resource).GroupResource()
|
||||
}
|
|
@ -0,0 +1,29 @@
|
|||
package v1beta2
|
||||
|
||||
import (
|
||||
"github.com/docker/cli/kubernetes/compose/clone"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// Scale contains the current/desired replica count for services in a stack.
|
||||
type Scale struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
Spec map[string]int `json:"spec,omitempty"`
|
||||
Status map[string]int `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
func (s *Scale) clone() *Scale {
|
||||
return &Scale{
|
||||
TypeMeta: s.TypeMeta,
|
||||
ObjectMeta: s.ObjectMeta,
|
||||
Spec: clone.MapOfStringToInt(s.Spec),
|
||||
Status: clone.MapOfStringToInt(s.Status),
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopyObject clones the scale
|
||||
func (s *Scale) DeepCopyObject() runtime.Object {
|
||||
return s.clone()
|
||||
}
|
|
@ -0,0 +1,256 @@
|
|||
package v1beta2
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// StackList is a list of stacks
|
||||
type StackList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
Items []Stack `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// Stack is v1beta2's representation of a Stack
|
||||
type Stack struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec *StackSpec `json:"spec,omitempty"`
|
||||
Status *StackStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// DeepCopyObject clones the stack
|
||||
func (s *Stack) DeepCopyObject() runtime.Object {
|
||||
return s.clone()
|
||||
}
|
||||
|
||||
// DeepCopyObject clones the stack list
|
||||
func (s *StackList) DeepCopyObject() runtime.Object {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
result := new(StackList)
|
||||
result.TypeMeta = s.TypeMeta
|
||||
result.ListMeta = s.ListMeta
|
||||
if s.Items == nil {
|
||||
return result
|
||||
}
|
||||
result.Items = make([]Stack, len(s.Items))
|
||||
for ix, s := range s.Items {
|
||||
result.Items[ix] = *s.clone()
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (s *Stack) clone() *Stack {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
result := new(Stack)
|
||||
result.TypeMeta = s.TypeMeta
|
||||
result.ObjectMeta = s.ObjectMeta
|
||||
result.Spec = s.Spec.clone()
|
||||
result.Status = s.Status.clone()
|
||||
return result
|
||||
}
|
||||
|
||||
// StackSpec defines the desired state of Stack
|
||||
type StackSpec struct {
|
||||
Services []ServiceConfig `json:"services,omitempty"`
|
||||
Secrets map[string]SecretConfig `json:"secrets,omitempty"`
|
||||
Configs map[string]ConfigObjConfig `json:"configs,omitempty"`
|
||||
}
|
||||
|
||||
// ServiceConfig is the configuration of one service
|
||||
type ServiceConfig struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
|
||||
CapAdd []string `json:"cap_add,omitempty"`
|
||||
CapDrop []string `json:"cap_drop,omitempty"`
|
||||
Command []string `json:"command,omitempty"`
|
||||
Configs []ServiceConfigObjConfig `json:"configs,omitempty"`
|
||||
Deploy DeployConfig `json:"deploy,omitempty"`
|
||||
Entrypoint []string `json:"entrypoint,omitempty"`
|
||||
Environment map[string]*string `json:"environment,omitempty"`
|
||||
ExtraHosts []string `json:"extra_hosts,omitempty"`
|
||||
Hostname string `json:"hostname,omitempty"`
|
||||
HealthCheck *HealthCheckConfig `json:"health_check,omitempty"`
|
||||
Image string `json:"image,omitempty"`
|
||||
Ipc string `json:"ipc,omitempty"`
|
||||
Labels map[string]string `json:"labels,omitempty"`
|
||||
Pid string `json:"pid,omitempty"`
|
||||
Ports []ServicePortConfig `json:"ports,omitempty"`
|
||||
Privileged bool `json:"privileged,omitempty"`
|
||||
ReadOnly bool `json:"read_only,omitempty"`
|
||||
Secrets []ServiceSecretConfig `json:"secrets,omitempty"`
|
||||
StdinOpen bool `json:"stdin_open,omitempty"`
|
||||
StopGracePeriod *time.Duration `json:"stop_grace_period,omitempty"`
|
||||
Tmpfs []string `json:"tmpfs,omitempty"`
|
||||
Tty bool `json:"tty,omitempty"`
|
||||
User *int64 `json:"user,omitempty"`
|
||||
Volumes []ServiceVolumeConfig `json:"volumes,omitempty"`
|
||||
WorkingDir string `json:"working_dir,omitempty"`
|
||||
}
|
||||
|
||||
// ServicePortConfig is the port configuration for a service
|
||||
type ServicePortConfig struct {
|
||||
Mode string `json:"mode,omitempty"`
|
||||
Target uint32 `json:"target,omitempty"`
|
||||
Published uint32 `json:"published,omitempty"`
|
||||
Protocol string `json:"protocol,omitempty"`
|
||||
}
|
||||
|
||||
// FileObjectConfig is a config type for a file used by a service
|
||||
type FileObjectConfig struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
File string `json:"file,omitempty"`
|
||||
External External `json:"external,omitempty"`
|
||||
Labels map[string]string `json:"labels,omitempty"`
|
||||
}
|
||||
|
||||
// SecretConfig for a secret
|
||||
type SecretConfig FileObjectConfig
|
||||
|
||||
// ConfigObjConfig is the config for the swarm "Config" object
|
||||
type ConfigObjConfig FileObjectConfig
|
||||
|
||||
// External identifies a Volume or Network as a reference to a resource that is
|
||||
// not managed, and should already exist.
|
||||
// External.name is deprecated and replaced by Volume.name
|
||||
type External struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
External bool `json:"external,omitempty"`
|
||||
}
|
||||
|
||||
// FileReferenceConfig for a reference to a swarm file object
|
||||
type FileReferenceConfig struct {
|
||||
Source string `json:"source,omitempty"`
|
||||
Target string `json:"target,omitempty"`
|
||||
UID string `json:"uid,omitempty"`
|
||||
GID string `json:"gid,omitempty"`
|
||||
Mode *uint32 `json:"mode,omitempty"`
|
||||
}
|
||||
|
||||
// ServiceConfigObjConfig is the config obj configuration for a service
|
||||
type ServiceConfigObjConfig FileReferenceConfig
|
||||
|
||||
// ServiceSecretConfig is the secret configuration for a service
|
||||
type ServiceSecretConfig FileReferenceConfig
|
||||
|
||||
// DeployConfig is the deployment configuration for a service
|
||||
type DeployConfig struct {
|
||||
Mode string `json:"mode,omitempty"`
|
||||
Replicas *uint64 `json:"replicas,omitempty"`
|
||||
Labels map[string]string `json:"labels,omitempty"`
|
||||
UpdateConfig *UpdateConfig `json:"update_config,omitempty"`
|
||||
Resources Resources `json:"resources,omitempty"`
|
||||
RestartPolicy *RestartPolicy `json:"restart_policy,omitempty"`
|
||||
Placement Placement `json:"placement,omitempty"`
|
||||
}
|
||||
|
||||
// UpdateConfig is the service update configuration
|
||||
type UpdateConfig struct {
|
||||
Parallelism *uint64 `json:"paralellism,omitempty"`
|
||||
}
|
||||
|
||||
// Resources the resource limits and reservations
|
||||
type Resources struct {
|
||||
Limits *Resource `json:"limits,omitempty"`
|
||||
Reservations *Resource `json:"reservations,omitempty"`
|
||||
}
|
||||
|
||||
// Resource is a resource to be limited or reserved
|
||||
type Resource struct {
|
||||
NanoCPUs string `json:"cpus,omitempty"`
|
||||
MemoryBytes int64 `json:"memory,omitempty"`
|
||||
}
|
||||
|
||||
// RestartPolicy is the service restart policy
|
||||
type RestartPolicy struct {
|
||||
Condition string `json:"condition,omitempty"`
|
||||
}
|
||||
|
||||
// Placement constraints for the service
|
||||
type Placement struct {
|
||||
Constraints *Constraints `json:"constraints,omitempty"`
|
||||
}
|
||||
|
||||
// Constraints lists constraints that can be set on the service
|
||||
type Constraints struct {
|
||||
OperatingSystem *Constraint
|
||||
Architecture *Constraint
|
||||
Hostname *Constraint
|
||||
MatchLabels map[string]Constraint
|
||||
}
|
||||
|
||||
// Constraint defines a constraint and it's operator (== or !=)
|
||||
type Constraint struct {
|
||||
Value string
|
||||
Operator string
|
||||
}
|
||||
|
||||
// HealthCheckConfig the healthcheck configuration for a service
|
||||
type HealthCheckConfig struct {
|
||||
Test []string `json:"test,omitempty"`
|
||||
Timeout *time.Duration `json:"timeout,omitempty"`
|
||||
Interval *time.Duration `json:"interval,omitempty"`
|
||||
Retries *uint64 `json:"retries,omitempty"`
|
||||
}
|
||||
|
||||
// ServiceVolumeConfig are references to a volume used by a service
|
||||
type ServiceVolumeConfig struct {
|
||||
Type string `json:"type,omitempty"`
|
||||
Source string `json:"source,omitempty"`
|
||||
Target string `json:"target,omitempty"`
|
||||
ReadOnly bool `json:"read_only,omitempty"`
|
||||
}
|
||||
|
||||
func (s *StackSpec) clone() *StackSpec {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
result := *s
|
||||
return &result
|
||||
}
|
||||
|
||||
// StackPhase is the deployment phase of a stack
|
||||
type StackPhase string
|
||||
|
||||
// These are valid conditions of a stack.
|
||||
const (
|
||||
// StackAvailable means the stack is available.
|
||||
StackAvailable StackPhase = "Available"
|
||||
// StackProgressing means the deployment is progressing.
|
||||
StackProgressing StackPhase = "Progressing"
|
||||
// StackFailure is added in a stack when one of its members fails to be created
|
||||
// or deleted.
|
||||
StackFailure StackPhase = "Failure"
|
||||
)
|
||||
|
||||
// StackStatus defines the observed state of Stack
|
||||
type StackStatus struct {
|
||||
// Current condition of the stack.
|
||||
// +optional
|
||||
Phase StackPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=StackPhase"`
|
||||
// A human readable message indicating details about the stack.
|
||||
// +optional
|
||||
Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
|
||||
}
|
||||
|
||||
func (s *StackStatus) clone() *StackStatus {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
result := *s
|
||||
return &result
|
||||
}
|
||||
|
||||
// Clone clones a Stack
|
||||
func (s *Stack) Clone() *Stack {
|
||||
return s.clone()
|
||||
}
|
|
@ -1,233 +0,0 @@
|
|||
// +build !ignore_autogenerated
|
||||
|
||||
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
|
||||
|
||||
package compose
|
||||
|
||||
import (
|
||||
reflect "reflect"
|
||||
|
||||
conversion "k8s.io/apimachinery/pkg/conversion"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// Deprecated: register deep-copy functions.
|
||||
func init() {
|
||||
SchemeBuilder.Register(RegisterDeepCopies)
|
||||
}
|
||||
|
||||
// Deprecated: RegisterDeepCopies adds deep-copy functions to the given scheme. Public
|
||||
// to allow building arbitrary schemes.
|
||||
func RegisterDeepCopies(scheme *runtime.Scheme) error {
|
||||
return scheme.AddGeneratedDeepCopyFuncs(
|
||||
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*ImpersonationConfig).DeepCopyInto(out.(*ImpersonationConfig))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&ImpersonationConfig{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*Owner).DeepCopyInto(out.(*Owner))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&Owner{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*OwnerList).DeepCopyInto(out.(*OwnerList))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&OwnerList{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*Stack).DeepCopyInto(out.(*Stack))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&Stack{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*StackList).DeepCopyInto(out.(*StackList))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&StackList{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*StackSpec).DeepCopyInto(out.(*StackSpec))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&StackSpec{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*StackStatus).DeepCopyInto(out.(*StackStatus))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&StackStatus{})},
|
||||
)
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ImpersonationConfig) DeepCopyInto(out *ImpersonationConfig) {
|
||||
*out = *in
|
||||
if in.Groups != nil {
|
||||
in, out := &in.Groups, &out.Groups
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Extra != nil {
|
||||
in, out := &in.Extra, &out.Extra
|
||||
*out = make(map[string][]string, len(*in))
|
||||
for key, val := range *in {
|
||||
if val == nil {
|
||||
(*out)[key] = nil
|
||||
} else {
|
||||
(*out)[key] = make([]string, len(val))
|
||||
copy((*out)[key], val)
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, creating a new ImpersonationConfig.
|
||||
func (x *ImpersonationConfig) DeepCopy() *ImpersonationConfig {
|
||||
if x == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ImpersonationConfig)
|
||||
x.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Owner) DeepCopyInto(out *Owner) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Owner.DeepCopyInto(&out.Owner)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, creating a new Owner.
|
||||
func (x *Owner) DeepCopy() *Owner {
|
||||
if x == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Owner)
|
||||
x.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
func (x *Owner) DeepCopyObject() runtime.Object {
|
||||
if c := x.DeepCopy(); c != nil {
|
||||
return c
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *OwnerList) DeepCopyInto(out *OwnerList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.ListMeta = in.ListMeta
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]Owner, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, creating a new OwnerList.
|
||||
func (x *OwnerList) DeepCopy() *OwnerList {
|
||||
if x == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(OwnerList)
|
||||
x.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Stack) DeepCopyInto(out *Stack) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
out.Status = in.Status
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, creating a new Stack.
|
||||
func (x *Stack) DeepCopy() *Stack {
|
||||
if x == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Stack)
|
||||
x.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (x *Stack) DeepCopyObject() runtime.Object {
|
||||
if c := x.DeepCopy(); c != nil {
|
||||
return c
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *StackList) DeepCopyInto(out *StackList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.ListMeta = in.ListMeta
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]Stack, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, creating a new StackList.
|
||||
func (x *StackList) DeepCopy() *StackList {
|
||||
if x == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(StackList)
|
||||
x.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (x *StackList) DeepCopyObject() runtime.Object {
|
||||
if c := x.DeepCopy(); c != nil {
|
||||
return c
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *StackSpec) DeepCopyInto(out *StackSpec) {
|
||||
*out = *in
|
||||
in.Owner.DeepCopyInto(&out.Owner)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, creating a new StackSpec.
|
||||
func (x *StackSpec) DeepCopy() *StackSpec {
|
||||
if x == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(StackSpec)
|
||||
x.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *StackStatus) DeepCopyInto(out *StackStatus) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, creating a new StackStatus.
|
||||
func (x *StackStatus) DeepCopy() *StackStatus {
|
||||
if x == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(StackStatus)
|
||||
x.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
|
@ -31,19 +31,6 @@ func ForService(stackName, serviceName string) map[string]string {
|
|||
return labels
|
||||
}
|
||||
|
||||
// Merge merges multiple lists of labels.
|
||||
func Merge(labelsList ...map[string]string) map[string]string {
|
||||
merged := map[string]string{}
|
||||
|
||||
for _, labels := range labelsList {
|
||||
for k, v := range labels {
|
||||
merged[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
return merged
|
||||
}
|
||||
|
||||
// SelectorForStack gives the labelSelector to use for a given stack.
|
||||
// Specific service names can be passed to narrow down the selection.
|
||||
func SelectorForStack(stackName string, serviceNames ...string) string {
|
||||
|
|
|
@ -80,3 +80,4 @@ k8s.io/client-go kubernetes-1.8.2
|
|||
k8s.io/kubernetes v1.8.2
|
||||
k8s.io/kube-openapi 61b46af70dfed79c6d24530cd23b41440a7f22a5
|
||||
vbom.ml/util 928aaa586d7718c70f4090ddf83f2b34c16fdc8d
|
||||
github.com/hashicorp/golang-lru 0a025b7e63adc15a622f29b0b2c4c3848243bbf6
|
|
@ -0,0 +1,212 @@
|
|||
package lru
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/hashicorp/golang-lru/simplelru"
|
||||
)
|
||||
|
||||
const (
|
||||
// Default2QRecentRatio is the ratio of the 2Q cache dedicated
|
||||
// to recently added entries that have only been accessed once.
|
||||
Default2QRecentRatio = 0.25
|
||||
|
||||
// Default2QGhostEntries is the default ratio of ghost
|
||||
// entries kept to track entries recently evicted
|
||||
Default2QGhostEntries = 0.50
|
||||
)
|
||||
|
||||
// TwoQueueCache is a thread-safe fixed size 2Q cache.
|
||||
// 2Q is an enhancement over the standard LRU cache
|
||||
// in that it tracks both frequently and recently used
|
||||
// entries separately. This avoids a burst in access to new
|
||||
// entries from evicting frequently used entries. It adds some
|
||||
// additional tracking overhead to the standard LRU cache, and is
|
||||
// computationally about 2x the cost, and adds some metadata over
|
||||
// head. The ARCCache is similar, but does not require setting any
|
||||
// parameters.
|
||||
type TwoQueueCache struct {
|
||||
size int
|
||||
recentSize int
|
||||
|
||||
recent *simplelru.LRU
|
||||
frequent *simplelru.LRU
|
||||
recentEvict *simplelru.LRU
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// New2Q creates a new TwoQueueCache using the default
|
||||
// values for the parameters.
|
||||
func New2Q(size int) (*TwoQueueCache, error) {
|
||||
return New2QParams(size, Default2QRecentRatio, Default2QGhostEntries)
|
||||
}
|
||||
|
||||
// New2QParams creates a new TwoQueueCache using the provided
|
||||
// parameter values.
|
||||
func New2QParams(size int, recentRatio float64, ghostRatio float64) (*TwoQueueCache, error) {
|
||||
if size <= 0 {
|
||||
return nil, fmt.Errorf("invalid size")
|
||||
}
|
||||
if recentRatio < 0.0 || recentRatio > 1.0 {
|
||||
return nil, fmt.Errorf("invalid recent ratio")
|
||||
}
|
||||
if ghostRatio < 0.0 || ghostRatio > 1.0 {
|
||||
return nil, fmt.Errorf("invalid ghost ratio")
|
||||
}
|
||||
|
||||
// Determine the sub-sizes
|
||||
recentSize := int(float64(size) * recentRatio)
|
||||
evictSize := int(float64(size) * ghostRatio)
|
||||
|
||||
// Allocate the LRUs
|
||||
recent, err := simplelru.NewLRU(size, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
frequent, err := simplelru.NewLRU(size, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
recentEvict, err := simplelru.NewLRU(evictSize, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Initialize the cache
|
||||
c := &TwoQueueCache{
|
||||
size: size,
|
||||
recentSize: recentSize,
|
||||
recent: recent,
|
||||
frequent: frequent,
|
||||
recentEvict: recentEvict,
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *TwoQueueCache) Get(key interface{}) (interface{}, bool) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
// Check if this is a frequent value
|
||||
if val, ok := c.frequent.Get(key); ok {
|
||||
return val, ok
|
||||
}
|
||||
|
||||
// If the value is contained in recent, then we
|
||||
// promote it to frequent
|
||||
if val, ok := c.recent.Peek(key); ok {
|
||||
c.recent.Remove(key)
|
||||
c.frequent.Add(key, val)
|
||||
return val, ok
|
||||
}
|
||||
|
||||
// No hit
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (c *TwoQueueCache) Add(key, value interface{}) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
// Check if the value is frequently used already,
|
||||
// and just update the value
|
||||
if c.frequent.Contains(key) {
|
||||
c.frequent.Add(key, value)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if the value is recently used, and promote
|
||||
// the value into the frequent list
|
||||
if c.recent.Contains(key) {
|
||||
c.recent.Remove(key)
|
||||
c.frequent.Add(key, value)
|
||||
return
|
||||
}
|
||||
|
||||
// If the value was recently evicted, add it to the
|
||||
// frequently used list
|
||||
if c.recentEvict.Contains(key) {
|
||||
c.ensureSpace(true)
|
||||
c.recentEvict.Remove(key)
|
||||
c.frequent.Add(key, value)
|
||||
return
|
||||
}
|
||||
|
||||
// Add to the recently seen list
|
||||
c.ensureSpace(false)
|
||||
c.recent.Add(key, value)
|
||||
return
|
||||
}
|
||||
|
||||
// ensureSpace is used to ensure we have space in the cache
|
||||
func (c *TwoQueueCache) ensureSpace(recentEvict bool) {
|
||||
// If we have space, nothing to do
|
||||
recentLen := c.recent.Len()
|
||||
freqLen := c.frequent.Len()
|
||||
if recentLen+freqLen < c.size {
|
||||
return
|
||||
}
|
||||
|
||||
// If the recent buffer is larger than
|
||||
// the target, evict from there
|
||||
if recentLen > 0 && (recentLen > c.recentSize || (recentLen == c.recentSize && !recentEvict)) {
|
||||
k, _, _ := c.recent.RemoveOldest()
|
||||
c.recentEvict.Add(k, nil)
|
||||
return
|
||||
}
|
||||
|
||||
// Remove from the frequent list otherwise
|
||||
c.frequent.RemoveOldest()
|
||||
}
|
||||
|
||||
func (c *TwoQueueCache) Len() int {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
return c.recent.Len() + c.frequent.Len()
|
||||
}
|
||||
|
||||
func (c *TwoQueueCache) Keys() []interface{} {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
k1 := c.frequent.Keys()
|
||||
k2 := c.recent.Keys()
|
||||
return append(k1, k2...)
|
||||
}
|
||||
|
||||
func (c *TwoQueueCache) Remove(key interface{}) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
if c.frequent.Remove(key) {
|
||||
return
|
||||
}
|
||||
if c.recent.Remove(key) {
|
||||
return
|
||||
}
|
||||
if c.recentEvict.Remove(key) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (c *TwoQueueCache) Purge() {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
c.recent.Purge()
|
||||
c.frequent.Purge()
|
||||
c.recentEvict.Purge()
|
||||
}
|
||||
|
||||
func (c *TwoQueueCache) Contains(key interface{}) bool {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
return c.frequent.Contains(key) || c.recent.Contains(key)
|
||||
}
|
||||
|
||||
func (c *TwoQueueCache) Peek(key interface{}) (interface{}, bool) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
if val, ok := c.frequent.Peek(key); ok {
|
||||
return val, ok
|
||||
}
|
||||
return c.recent.Peek(key)
|
||||
}
|
|
@ -0,0 +1,362 @@
|
|||
Mozilla Public License, version 2.0
|
||||
|
||||
1. Definitions
|
||||
|
||||
1.1. "Contributor"
|
||||
|
||||
means each individual or legal entity that creates, contributes to the
|
||||
creation of, or owns Covered Software.
|
||||
|
||||
1.2. "Contributor Version"
|
||||
|
||||
means the combination of the Contributions of others (if any) used by a
|
||||
Contributor and that particular Contributor's Contribution.
|
||||
|
||||
1.3. "Contribution"
|
||||
|
||||
means Covered Software of a particular Contributor.
|
||||
|
||||
1.4. "Covered Software"
|
||||
|
||||
means Source Code Form to which the initial Contributor has attached the
|
||||
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
||||
Modifications of such Source Code Form, in each case including portions
|
||||
thereof.
|
||||
|
||||
1.5. "Incompatible With Secondary Licenses"
|
||||
means
|
||||
|
||||
a. that the initial Contributor has attached the notice described in
|
||||
Exhibit B to the Covered Software; or
|
||||
|
||||
b. that the Covered Software was made available under the terms of
|
||||
version 1.1 or earlier of the License, but not also under the terms of
|
||||
a Secondary License.
|
||||
|
||||
1.6. "Executable Form"
|
||||
|
||||
means any form of the work other than Source Code Form.
|
||||
|
||||
1.7. "Larger Work"
|
||||
|
||||
means a work that combines Covered Software with other material, in a
|
||||
separate file or files, that is not Covered Software.
|
||||
|
||||
1.8. "License"
|
||||
|
||||
means this document.
|
||||
|
||||
1.9. "Licensable"
|
||||
|
||||
means having the right to grant, to the maximum extent possible, whether
|
||||
at the time of the initial grant or subsequently, any and all of the
|
||||
rights conveyed by this License.
|
||||
|
||||
1.10. "Modifications"
|
||||
|
||||
means any of the following:
|
||||
|
||||
a. any file in Source Code Form that results from an addition to,
|
||||
deletion from, or modification of the contents of Covered Software; or
|
||||
|
||||
b. any new file in Source Code Form that contains any Covered Software.
|
||||
|
||||
1.11. "Patent Claims" of a Contributor
|
||||
|
||||
means any patent claim(s), including without limitation, method,
|
||||
process, and apparatus claims, in any patent Licensable by such
|
||||
Contributor that would be infringed, but for the grant of the License,
|
||||
by the making, using, selling, offering for sale, having made, import,
|
||||
or transfer of either its Contributions or its Contributor Version.
|
||||
|
||||
1.12. "Secondary License"
|
||||
|
||||
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
||||
General Public License, Version 2.1, the GNU Affero General Public
|
||||
License, Version 3.0, or any later versions of those licenses.
|
||||
|
||||
1.13. "Source Code Form"
|
||||
|
||||
means the form of the work preferred for making modifications.
|
||||
|
||||
1.14. "You" (or "Your")
|
||||
|
||||
means an individual or a legal entity exercising rights under this
|
||||
License. For legal entities, "You" includes any entity that controls, is
|
||||
controlled by, or is under common control with You. For purposes of this
|
||||
definition, "control" means (a) the power, direct or indirect, to cause
|
||||
the direction or management of such entity, whether by contract or
|
||||
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
||||
outstanding shares or beneficial ownership of such entity.
|
||||
|
||||
|
||||
2. License Grants and Conditions
|
||||
|
||||
2.1. Grants
|
||||
|
||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||
non-exclusive license:
|
||||
|
||||
a. under intellectual property rights (other than patent or trademark)
|
||||
Licensable by such Contributor to use, reproduce, make available,
|
||||
modify, display, perform, distribute, and otherwise exploit its
|
||||
Contributions, either on an unmodified basis, with Modifications, or
|
||||
as part of a Larger Work; and
|
||||
|
||||
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
||||
sale, have made, import, and otherwise transfer either its
|
||||
Contributions or its Contributor Version.
|
||||
|
||||
2.2. Effective Date
|
||||
|
||||
The licenses granted in Section 2.1 with respect to any Contribution
|
||||
become effective for each Contribution on the date the Contributor first
|
||||
distributes such Contribution.
|
||||
|
||||
2.3. Limitations on Grant Scope
|
||||
|
||||
The licenses granted in this Section 2 are the only rights granted under
|
||||
this License. No additional rights or licenses will be implied from the
|
||||
distribution or licensing of Covered Software under this License.
|
||||
Notwithstanding Section 2.1(b) above, no patent license is granted by a
|
||||
Contributor:
|
||||
|
||||
a. for any code that a Contributor has removed from Covered Software; or
|
||||
|
||||
b. for infringements caused by: (i) Your and any other third party's
|
||||
modifications of Covered Software, or (ii) the combination of its
|
||||
Contributions with other software (except as part of its Contributor
|
||||
Version); or
|
||||
|
||||
c. under Patent Claims infringed by Covered Software in the absence of
|
||||
its Contributions.
|
||||
|
||||
This License does not grant any rights in the trademarks, service marks,
|
||||
or logos of any Contributor (except as may be necessary to comply with
|
||||
the notice requirements in Section 3.4).
|
||||
|
||||
2.4. Subsequent Licenses
|
||||
|
||||
No Contributor makes additional grants as a result of Your choice to
|
||||
distribute the Covered Software under a subsequent version of this
|
||||
License (see Section 10.2) or under the terms of a Secondary License (if
|
||||
permitted under the terms of Section 3.3).
|
||||
|
||||
2.5. Representation
|
||||
|
||||
Each Contributor represents that the Contributor believes its
|
||||
Contributions are its original creation(s) or it has sufficient rights to
|
||||
grant the rights to its Contributions conveyed by this License.
|
||||
|
||||
2.6. Fair Use
|
||||
|
||||
This License is not intended to limit any rights You have under
|
||||
applicable copyright doctrines of fair use, fair dealing, or other
|
||||
equivalents.
|
||||
|
||||
2.7. Conditions
|
||||
|
||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
||||
Section 2.1.
|
||||
|
||||
|
||||
3. Responsibilities
|
||||
|
||||
3.1. Distribution of Source Form
|
||||
|
||||
All distribution of Covered Software in Source Code Form, including any
|
||||
Modifications that You create or to which You contribute, must be under
|
||||
the terms of this License. You must inform recipients that the Source
|
||||
Code Form of the Covered Software is governed by the terms of this
|
||||
License, and how they can obtain a copy of this License. You may not
|
||||
attempt to alter or restrict the recipients' rights in the Source Code
|
||||
Form.
|
||||
|
||||
3.2. Distribution of Executable Form
|
||||
|
||||
If You distribute Covered Software in Executable Form then:
|
||||
|
||||
a. such Covered Software must also be made available in Source Code Form,
|
||||
as described in Section 3.1, and You must inform recipients of the
|
||||
Executable Form how they can obtain a copy of such Source Code Form by
|
||||
reasonable means in a timely manner, at a charge no more than the cost
|
||||
of distribution to the recipient; and
|
||||
|
||||
b. You may distribute such Executable Form under the terms of this
|
||||
License, or sublicense it under different terms, provided that the
|
||||
license for the Executable Form does not attempt to limit or alter the
|
||||
recipients' rights in the Source Code Form under this License.
|
||||
|
||||
3.3. Distribution of a Larger Work
|
||||
|
||||
You may create and distribute a Larger Work under terms of Your choice,
|
||||
provided that You also comply with the requirements of this License for
|
||||
the Covered Software. If the Larger Work is a combination of Covered
|
||||
Software with a work governed by one or more Secondary Licenses, and the
|
||||
Covered Software is not Incompatible With Secondary Licenses, this
|
||||
License permits You to additionally distribute such Covered Software
|
||||
under the terms of such Secondary License(s), so that the recipient of
|
||||
the Larger Work may, at their option, further distribute the Covered
|
||||
Software under the terms of either this License or such Secondary
|
||||
License(s).
|
||||
|
||||
3.4. Notices
|
||||
|
||||
You may not remove or alter the substance of any license notices
|
||||
(including copyright notices, patent notices, disclaimers of warranty, or
|
||||
limitations of liability) contained within the Source Code Form of the
|
||||
Covered Software, except that You may alter any license notices to the
|
||||
extent required to remedy known factual inaccuracies.
|
||||
|
||||
3.5. Application of Additional Terms
|
||||
|
||||
You may choose to offer, and to charge a fee for, warranty, support,
|
||||
indemnity or liability obligations to one or more recipients of Covered
|
||||
Software. However, You may do so only on Your own behalf, and not on
|
||||
behalf of any Contributor. You must make it absolutely clear that any
|
||||
such warranty, support, indemnity, or liability obligation is offered by
|
||||
You alone, and You hereby agree to indemnify every Contributor for any
|
||||
liability incurred by such Contributor as a result of warranty, support,
|
||||
indemnity or liability terms You offer. You may include additional
|
||||
disclaimers of warranty and limitations of liability specific to any
|
||||
jurisdiction.
|
||||
|
||||
4. Inability to Comply Due to Statute or Regulation
|
||||
|
||||
If it is impossible for You to comply with any of the terms of this License
|
||||
with respect to some or all of the Covered Software due to statute,
|
||||
judicial order, or regulation then You must: (a) comply with the terms of
|
||||
this License to the maximum extent possible; and (b) describe the
|
||||
limitations and the code they affect. Such description must be placed in a
|
||||
text file included with all distributions of the Covered Software under
|
||||
this License. Except to the extent prohibited by statute or regulation,
|
||||
such description must be sufficiently detailed for a recipient of ordinary
|
||||
skill to be able to understand it.
|
||||
|
||||
5. Termination
|
||||
|
||||
5.1. The rights granted under this License will terminate automatically if You
|
||||
fail to comply with any of its terms. However, if You become compliant,
|
||||
then the rights granted under this License from a particular Contributor
|
||||
are reinstated (a) provisionally, unless and until such Contributor
|
||||
explicitly and finally terminates Your grants, and (b) on an ongoing
|
||||
basis, if such Contributor fails to notify You of the non-compliance by
|
||||
some reasonable means prior to 60 days after You have come back into
|
||||
compliance. Moreover, Your grants from a particular Contributor are
|
||||
reinstated on an ongoing basis if such Contributor notifies You of the
|
||||
non-compliance by some reasonable means, this is the first time You have
|
||||
received notice of non-compliance with this License from such
|
||||
Contributor, and You become compliant prior to 30 days after Your receipt
|
||||
of the notice.
|
||||
|
||||
5.2. If You initiate litigation against any entity by asserting a patent
|
||||
infringement claim (excluding declaratory judgment actions,
|
||||
counter-claims, and cross-claims) alleging that a Contributor Version
|
||||
directly or indirectly infringes any patent, then the rights granted to
|
||||
You by any and all Contributors for the Covered Software under Section
|
||||
2.1 of this License shall terminate.
|
||||
|
||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
||||
license agreements (excluding distributors and resellers) which have been
|
||||
validly granted by You or Your distributors under this License prior to
|
||||
termination shall survive termination.
|
||||
|
||||
6. Disclaimer of Warranty
|
||||
|
||||
Covered Software is provided under this License on an "as is" basis,
|
||||
without warranty of any kind, either expressed, implied, or statutory,
|
||||
including, without limitation, warranties that the Covered Software is free
|
||||
of defects, merchantable, fit for a particular purpose or non-infringing.
|
||||
The entire risk as to the quality and performance of the Covered Software
|
||||
is with You. Should any Covered Software prove defective in any respect,
|
||||
You (not any Contributor) assume the cost of any necessary servicing,
|
||||
repair, or correction. This disclaimer of warranty constitutes an essential
|
||||
part of this License. No use of any Covered Software is authorized under
|
||||
this License except under this disclaimer.
|
||||
|
||||
7. Limitation of Liability
|
||||
|
||||
Under no circumstances and under no legal theory, whether tort (including
|
||||
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
||||
distributes Covered Software as permitted above, be liable to You for any
|
||||
direct, indirect, special, incidental, or consequential damages of any
|
||||
character including, without limitation, damages for lost profits, loss of
|
||||
goodwill, work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses, even if such party shall have been
|
||||
informed of the possibility of such damages. This limitation of liability
|
||||
shall not apply to liability for death or personal injury resulting from
|
||||
such party's negligence to the extent applicable law prohibits such
|
||||
limitation. Some jurisdictions do not allow the exclusion or limitation of
|
||||
incidental or consequential damages, so this exclusion and limitation may
|
||||
not apply to You.
|
||||
|
||||
8. Litigation
|
||||
|
||||
Any litigation relating to this License may be brought only in the courts
|
||||
of a jurisdiction where the defendant maintains its principal place of
|
||||
business and such litigation shall be governed by laws of that
|
||||
jurisdiction, without reference to its conflict-of-law provisions. Nothing
|
||||
in this Section shall prevent a party's ability to bring cross-claims or
|
||||
counter-claims.
|
||||
|
||||
9. Miscellaneous
|
||||
|
||||
This License represents the complete agreement concerning the subject
|
||||
matter hereof. If any provision of this License is held to be
|
||||
unenforceable, such provision shall be reformed only to the extent
|
||||
necessary to make it enforceable. Any law or regulation which provides that
|
||||
the language of a contract shall be construed against the drafter shall not
|
||||
be used to construe this License against a Contributor.
|
||||
|
||||
|
||||
10. Versions of the License
|
||||
|
||||
10.1. New Versions
|
||||
|
||||
Mozilla Foundation is the license steward. Except as provided in Section
|
||||
10.3, no one other than the license steward has the right to modify or
|
||||
publish new versions of this License. Each version will be given a
|
||||
distinguishing version number.
|
||||
|
||||
10.2. Effect of New Versions
|
||||
|
||||
You may distribute the Covered Software under the terms of the version
|
||||
of the License under which You originally received the Covered Software,
|
||||
or under the terms of any subsequent version published by the license
|
||||
steward.
|
||||
|
||||
10.3. Modified Versions
|
||||
|
||||
If you create software not governed by this License, and you want to
|
||||
create a new license for such software, you may create and use a
|
||||
modified version of this License if you rename the license and remove
|
||||
any references to the name of the license steward (except to note that
|
||||
such modified license differs from this License).
|
||||
|
||||
10.4. Distributing Source Code Form that is Incompatible With Secondary
|
||||
Licenses If You choose to distribute Source Code Form that is
|
||||
Incompatible With Secondary Licenses under the terms of this version of
|
||||
the License, the notice described in Exhibit B of this License must be
|
||||
attached.
|
||||
|
||||
Exhibit A - Source Code Form License Notice
|
||||
|
||||
This Source Code Form is subject to the
|
||||
terms of the Mozilla Public License, v.
|
||||
2.0. If a copy of the MPL was not
|
||||
distributed with this file, You can
|
||||
obtain one at
|
||||
http://mozilla.org/MPL/2.0/.
|
||||
|
||||
If it is not possible or desirable to put the notice in a particular file,
|
||||
then You may include the notice in a location (such as a LICENSE file in a
|
||||
relevant directory) where a recipient would be likely to look for such a
|
||||
notice.
|
||||
|
||||
You may add additional accurate notices of copyright ownership.
|
||||
|
||||
Exhibit B - "Incompatible With Secondary Licenses" Notice
|
||||
|
||||
This Source Code Form is "Incompatible
|
||||
With Secondary Licenses", as defined by
|
||||
the Mozilla Public License, v. 2.0.
|
|
@ -0,0 +1,25 @@
|
|||
golang-lru
|
||||
==========
|
||||
|
||||
This provides the `lru` package which implements a fixed-size
|
||||
thread safe LRU cache. It is based on the cache in Groupcache.
|
||||
|
||||
Documentation
|
||||
=============
|
||||
|
||||
Full docs are available on [Godoc](http://godoc.org/github.com/hashicorp/golang-lru)
|
||||
|
||||
Example
|
||||
=======
|
||||
|
||||
Using the LRU is very simple:
|
||||
|
||||
```go
|
||||
l, _ := New(128)
|
||||
for i := 0; i < 256; i++ {
|
||||
l.Add(i, nil)
|
||||
}
|
||||
if l.Len() != 128 {
|
||||
panic(fmt.Sprintf("bad len: %v", l.Len()))
|
||||
}
|
||||
```
|
|
@ -0,0 +1,257 @@
|
|||
package lru
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/hashicorp/golang-lru/simplelru"
|
||||
)
|
||||
|
||||
// ARCCache is a thread-safe fixed size Adaptive Replacement Cache (ARC).
|
||||
// ARC is an enhancement over the standard LRU cache in that tracks both
|
||||
// frequency and recency of use. This avoids a burst in access to new
|
||||
// entries from evicting the frequently used older entries. It adds some
|
||||
// additional tracking overhead to a standard LRU cache, computationally
|
||||
// it is roughly 2x the cost, and the extra memory overhead is linear
|
||||
// with the size of the cache. ARC has been patented by IBM, but is
|
||||
// similar to the TwoQueueCache (2Q) which requires setting parameters.
|
||||
type ARCCache struct {
|
||||
size int // Size is the total capacity of the cache
|
||||
p int // P is the dynamic preference towards T1 or T2
|
||||
|
||||
t1 *simplelru.LRU // T1 is the LRU for recently accessed items
|
||||
b1 *simplelru.LRU // B1 is the LRU for evictions from t1
|
||||
|
||||
t2 *simplelru.LRU // T2 is the LRU for frequently accessed items
|
||||
b2 *simplelru.LRU // B2 is the LRU for evictions from t2
|
||||
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// NewARC creates an ARC of the given size
|
||||
func NewARC(size int) (*ARCCache, error) {
|
||||
// Create the sub LRUs
|
||||
b1, err := simplelru.NewLRU(size, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b2, err := simplelru.NewLRU(size, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
t1, err := simplelru.NewLRU(size, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
t2, err := simplelru.NewLRU(size, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Initialize the ARC
|
||||
c := &ARCCache{
|
||||
size: size,
|
||||
p: 0,
|
||||
t1: t1,
|
||||
b1: b1,
|
||||
t2: t2,
|
||||
b2: b2,
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Get looks up a key's value from the cache.
|
||||
func (c *ARCCache) Get(key interface{}) (interface{}, bool) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
// Ff the value is contained in T1 (recent), then
|
||||
// promote it to T2 (frequent)
|
||||
if val, ok := c.t1.Peek(key); ok {
|
||||
c.t1.Remove(key)
|
||||
c.t2.Add(key, val)
|
||||
return val, ok
|
||||
}
|
||||
|
||||
// Check if the value is contained in T2 (frequent)
|
||||
if val, ok := c.t2.Get(key); ok {
|
||||
return val, ok
|
||||
}
|
||||
|
||||
// No hit
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Add adds a value to the cache.
|
||||
func (c *ARCCache) Add(key, value interface{}) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
// Check if the value is contained in T1 (recent), and potentially
|
||||
// promote it to frequent T2
|
||||
if c.t1.Contains(key) {
|
||||
c.t1.Remove(key)
|
||||
c.t2.Add(key, value)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if the value is already in T2 (frequent) and update it
|
||||
if c.t2.Contains(key) {
|
||||
c.t2.Add(key, value)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if this value was recently evicted as part of the
|
||||
// recently used list
|
||||
if c.b1.Contains(key) {
|
||||
// T1 set is too small, increase P appropriately
|
||||
delta := 1
|
||||
b1Len := c.b1.Len()
|
||||
b2Len := c.b2.Len()
|
||||
if b2Len > b1Len {
|
||||
delta = b2Len / b1Len
|
||||
}
|
||||
if c.p+delta >= c.size {
|
||||
c.p = c.size
|
||||
} else {
|
||||
c.p += delta
|
||||
}
|
||||
|
||||
// Potentially need to make room in the cache
|
||||
if c.t1.Len()+c.t2.Len() >= c.size {
|
||||
c.replace(false)
|
||||
}
|
||||
|
||||
// Remove from B1
|
||||
c.b1.Remove(key)
|
||||
|
||||
// Add the key to the frequently used list
|
||||
c.t2.Add(key, value)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if this value was recently evicted as part of the
|
||||
// frequently used list
|
||||
if c.b2.Contains(key) {
|
||||
// T2 set is too small, decrease P appropriately
|
||||
delta := 1
|
||||
b1Len := c.b1.Len()
|
||||
b2Len := c.b2.Len()
|
||||
if b1Len > b2Len {
|
||||
delta = b1Len / b2Len
|
||||
}
|
||||
if delta >= c.p {
|
||||
c.p = 0
|
||||
} else {
|
||||
c.p -= delta
|
||||
}
|
||||
|
||||
// Potentially need to make room in the cache
|
||||
if c.t1.Len()+c.t2.Len() >= c.size {
|
||||
c.replace(true)
|
||||
}
|
||||
|
||||
// Remove from B2
|
||||
c.b2.Remove(key)
|
||||
|
||||
// Add the key to the frequntly used list
|
||||
c.t2.Add(key, value)
|
||||
return
|
||||
}
|
||||
|
||||
// Potentially need to make room in the cache
|
||||
if c.t1.Len()+c.t2.Len() >= c.size {
|
||||
c.replace(false)
|
||||
}
|
||||
|
||||
// Keep the size of the ghost buffers trim
|
||||
if c.b1.Len() > c.size-c.p {
|
||||
c.b1.RemoveOldest()
|
||||
}
|
||||
if c.b2.Len() > c.p {
|
||||
c.b2.RemoveOldest()
|
||||
}
|
||||
|
||||
// Add to the recently seen list
|
||||
c.t1.Add(key, value)
|
||||
return
|
||||
}
|
||||
|
||||
// replace is used to adaptively evict from either T1 or T2
|
||||
// based on the current learned value of P
|
||||
func (c *ARCCache) replace(b2ContainsKey bool) {
|
||||
t1Len := c.t1.Len()
|
||||
if t1Len > 0 && (t1Len > c.p || (t1Len == c.p && b2ContainsKey)) {
|
||||
k, _, ok := c.t1.RemoveOldest()
|
||||
if ok {
|
||||
c.b1.Add(k, nil)
|
||||
}
|
||||
} else {
|
||||
k, _, ok := c.t2.RemoveOldest()
|
||||
if ok {
|
||||
c.b2.Add(k, nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Len returns the number of cached entries
|
||||
func (c *ARCCache) Len() int {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
return c.t1.Len() + c.t2.Len()
|
||||
}
|
||||
|
||||
// Keys returns all the cached keys
|
||||
func (c *ARCCache) Keys() []interface{} {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
k1 := c.t1.Keys()
|
||||
k2 := c.t2.Keys()
|
||||
return append(k1, k2...)
|
||||
}
|
||||
|
||||
// Remove is used to purge a key from the cache
|
||||
func (c *ARCCache) Remove(key interface{}) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
if c.t1.Remove(key) {
|
||||
return
|
||||
}
|
||||
if c.t2.Remove(key) {
|
||||
return
|
||||
}
|
||||
if c.b1.Remove(key) {
|
||||
return
|
||||
}
|
||||
if c.b2.Remove(key) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Purge is used to clear the cache
|
||||
func (c *ARCCache) Purge() {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
c.t1.Purge()
|
||||
c.t2.Purge()
|
||||
c.b1.Purge()
|
||||
c.b2.Purge()
|
||||
}
|
||||
|
||||
// Contains is used to check if the cache contains a key
|
||||
// without updating recency or frequency.
|
||||
func (c *ARCCache) Contains(key interface{}) bool {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
return c.t1.Contains(key) || c.t2.Contains(key)
|
||||
}
|
||||
|
||||
// Peek is used to inspect the cache value of a key
|
||||
// without updating recency or frequency.
|
||||
func (c *ARCCache) Peek(key interface{}) (interface{}, bool) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
if val, ok := c.t1.Peek(key); ok {
|
||||
return val, ok
|
||||
}
|
||||
return c.t2.Peek(key)
|
||||
}
|
|
@ -0,0 +1,114 @@
|
|||
// This package provides a simple LRU cache. It is based on the
|
||||
// LRU implementation in groupcache:
|
||||
// https://github.com/golang/groupcache/tree/master/lru
|
||||
package lru
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/hashicorp/golang-lru/simplelru"
|
||||
)
|
||||
|
||||
// Cache is a thread-safe fixed size LRU cache.
|
||||
type Cache struct {
|
||||
lru *simplelru.LRU
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// New creates an LRU of the given size
|
||||
func New(size int) (*Cache, error) {
|
||||
return NewWithEvict(size, nil)
|
||||
}
|
||||
|
||||
// NewWithEvict constructs a fixed size cache with the given eviction
|
||||
// callback.
|
||||
func NewWithEvict(size int, onEvicted func(key interface{}, value interface{})) (*Cache, error) {
|
||||
lru, err := simplelru.NewLRU(size, simplelru.EvictCallback(onEvicted))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c := &Cache{
|
||||
lru: lru,
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Purge is used to completely clear the cache
|
||||
func (c *Cache) Purge() {
|
||||
c.lock.Lock()
|
||||
c.lru.Purge()
|
||||
c.lock.Unlock()
|
||||
}
|
||||
|
||||
// Add adds a value to the cache. Returns true if an eviction occurred.
|
||||
func (c *Cache) Add(key, value interface{}) bool {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
return c.lru.Add(key, value)
|
||||
}
|
||||
|
||||
// Get looks up a key's value from the cache.
|
||||
func (c *Cache) Get(key interface{}) (interface{}, bool) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
return c.lru.Get(key)
|
||||
}
|
||||
|
||||
// Check if a key is in the cache, without updating the recent-ness
|
||||
// or deleting it for being stale.
|
||||
func (c *Cache) Contains(key interface{}) bool {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
return c.lru.Contains(key)
|
||||
}
|
||||
|
||||
// Returns the key value (or undefined if not found) without updating
|
||||
// the "recently used"-ness of the key.
|
||||
func (c *Cache) Peek(key interface{}) (interface{}, bool) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
return c.lru.Peek(key)
|
||||
}
|
||||
|
||||
// ContainsOrAdd checks if a key is in the cache without updating the
|
||||
// recent-ness or deleting it for being stale, and if not, adds the value.
|
||||
// Returns whether found and whether an eviction occurred.
|
||||
func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evict bool) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if c.lru.Contains(key) {
|
||||
return true, false
|
||||
} else {
|
||||
evict := c.lru.Add(key, value)
|
||||
return false, evict
|
||||
}
|
||||
}
|
||||
|
||||
// Remove removes the provided key from the cache.
|
||||
func (c *Cache) Remove(key interface{}) {
|
||||
c.lock.Lock()
|
||||
c.lru.Remove(key)
|
||||
c.lock.Unlock()
|
||||
}
|
||||
|
||||
// RemoveOldest removes the oldest item from the cache.
|
||||
func (c *Cache) RemoveOldest() {
|
||||
c.lock.Lock()
|
||||
c.lru.RemoveOldest()
|
||||
c.lock.Unlock()
|
||||
}
|
||||
|
||||
// Keys returns a slice of the keys in the cache, from oldest to newest.
|
||||
func (c *Cache) Keys() []interface{} {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
return c.lru.Keys()
|
||||
}
|
||||
|
||||
// Len returns the number of items in the cache.
|
||||
func (c *Cache) Len() int {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
return c.lru.Len()
|
||||
}
|
|
@ -0,0 +1,160 @@
|
|||
package simplelru
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"errors"
|
||||
)
|
||||
|
||||
// EvictCallback is used to get a callback when a cache entry is evicted
|
||||
type EvictCallback func(key interface{}, value interface{})
|
||||
|
||||
// LRU implements a non-thread safe fixed size LRU cache
|
||||
type LRU struct {
|
||||
size int
|
||||
evictList *list.List
|
||||
items map[interface{}]*list.Element
|
||||
onEvict EvictCallback
|
||||
}
|
||||
|
||||
// entry is used to hold a value in the evictList
|
||||
type entry struct {
|
||||
key interface{}
|
||||
value interface{}
|
||||
}
|
||||
|
||||
// NewLRU constructs an LRU of the given size
|
||||
func NewLRU(size int, onEvict EvictCallback) (*LRU, error) {
|
||||
if size <= 0 {
|
||||
return nil, errors.New("Must provide a positive size")
|
||||
}
|
||||
c := &LRU{
|
||||
size: size,
|
||||
evictList: list.New(),
|
||||
items: make(map[interface{}]*list.Element),
|
||||
onEvict: onEvict,
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Purge is used to completely clear the cache
|
||||
func (c *LRU) Purge() {
|
||||
for k, v := range c.items {
|
||||
if c.onEvict != nil {
|
||||
c.onEvict(k, v.Value.(*entry).value)
|
||||
}
|
||||
delete(c.items, k)
|
||||
}
|
||||
c.evictList.Init()
|
||||
}
|
||||
|
||||
// Add adds a value to the cache. Returns true if an eviction occurred.
|
||||
func (c *LRU) Add(key, value interface{}) bool {
|
||||
// Check for existing item
|
||||
if ent, ok := c.items[key]; ok {
|
||||
c.evictList.MoveToFront(ent)
|
||||
ent.Value.(*entry).value = value
|
||||
return false
|
||||
}
|
||||
|
||||
// Add new item
|
||||
ent := &entry{key, value}
|
||||
entry := c.evictList.PushFront(ent)
|
||||
c.items[key] = entry
|
||||
|
||||
evict := c.evictList.Len() > c.size
|
||||
// Verify size not exceeded
|
||||
if evict {
|
||||
c.removeOldest()
|
||||
}
|
||||
return evict
|
||||
}
|
||||
|
||||
// Get looks up a key's value from the cache.
|
||||
func (c *LRU) Get(key interface{}) (value interface{}, ok bool) {
|
||||
if ent, ok := c.items[key]; ok {
|
||||
c.evictList.MoveToFront(ent)
|
||||
return ent.Value.(*entry).value, true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Check if a key is in the cache, without updating the recent-ness
|
||||
// or deleting it for being stale.
|
||||
func (c *LRU) Contains(key interface{}) (ok bool) {
|
||||
_, ok = c.items[key]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Returns the key value (or undefined if not found) without updating
|
||||
// the "recently used"-ness of the key.
|
||||
func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) {
|
||||
if ent, ok := c.items[key]; ok {
|
||||
return ent.Value.(*entry).value, true
|
||||
}
|
||||
return nil, ok
|
||||
}
|
||||
|
||||
// Remove removes the provided key from the cache, returning if the
|
||||
// key was contained.
|
||||
func (c *LRU) Remove(key interface{}) bool {
|
||||
if ent, ok := c.items[key]; ok {
|
||||
c.removeElement(ent)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// RemoveOldest removes the oldest item from the cache.
|
||||
func (c *LRU) RemoveOldest() (interface{}, interface{}, bool) {
|
||||
ent := c.evictList.Back()
|
||||
if ent != nil {
|
||||
c.removeElement(ent)
|
||||
kv := ent.Value.(*entry)
|
||||
return kv.key, kv.value, true
|
||||
}
|
||||
return nil, nil, false
|
||||
}
|
||||
|
||||
// GetOldest returns the oldest entry
|
||||
func (c *LRU) GetOldest() (interface{}, interface{}, bool) {
|
||||
ent := c.evictList.Back()
|
||||
if ent != nil {
|
||||
kv := ent.Value.(*entry)
|
||||
return kv.key, kv.value, true
|
||||
}
|
||||
return nil, nil, false
|
||||
}
|
||||
|
||||
// Keys returns a slice of the keys in the cache, from oldest to newest.
|
||||
func (c *LRU) Keys() []interface{} {
|
||||
keys := make([]interface{}, len(c.items))
|
||||
i := 0
|
||||
for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() {
|
||||
keys[i] = ent.Value.(*entry).key
|
||||
i++
|
||||
}
|
||||
return keys
|
||||
}
|
||||
|
||||
// Len returns the number of items in the cache.
|
||||
func (c *LRU) Len() int {
|
||||
return c.evictList.Len()
|
||||
}
|
||||
|
||||
// removeOldest removes the oldest item from the cache.
|
||||
func (c *LRU) removeOldest() {
|
||||
ent := c.evictList.Back()
|
||||
if ent != nil {
|
||||
c.removeElement(ent)
|
||||
}
|
||||
}
|
||||
|
||||
// removeElement is used to remove a given list element from the cache
|
||||
func (c *LRU) removeElement(e *list.Element) {
|
||||
c.evictList.Remove(e)
|
||||
kv := e.Value.(*entry)
|
||||
delete(c.items, kv.key)
|
||||
if c.onEvict != nil {
|
||||
c.onEvict(kv.key, kv.value)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,83 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
shardsCount int = 32
|
||||
)
|
||||
|
||||
type Cache []*cacheShard
|
||||
|
||||
func NewCache(maxSize int) Cache {
|
||||
if maxSize < shardsCount {
|
||||
maxSize = shardsCount
|
||||
}
|
||||
cache := make(Cache, shardsCount)
|
||||
for i := 0; i < shardsCount; i++ {
|
||||
cache[i] = &cacheShard{
|
||||
items: make(map[uint64]interface{}),
|
||||
maxSize: maxSize / shardsCount,
|
||||
}
|
||||
}
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c Cache) getShard(index uint64) *cacheShard {
|
||||
return c[index%uint64(shardsCount)]
|
||||
}
|
||||
|
||||
// Returns true if object already existed, false otherwise.
|
||||
func (c *Cache) Add(index uint64, obj interface{}) bool {
|
||||
return c.getShard(index).add(index, obj)
|
||||
}
|
||||
|
||||
func (c *Cache) Get(index uint64) (obj interface{}, found bool) {
|
||||
return c.getShard(index).get(index)
|
||||
}
|
||||
|
||||
type cacheShard struct {
|
||||
items map[uint64]interface{}
|
||||
sync.RWMutex
|
||||
maxSize int
|
||||
}
|
||||
|
||||
// Returns true if object already existed, false otherwise.
|
||||
func (s *cacheShard) add(index uint64, obj interface{}) bool {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
_, isOverwrite := s.items[index]
|
||||
if !isOverwrite && len(s.items) >= s.maxSize {
|
||||
var randomKey uint64
|
||||
for randomKey = range s.items {
|
||||
break
|
||||
}
|
||||
delete(s.items, randomKey)
|
||||
}
|
||||
s.items[index] = obj
|
||||
return isOverwrite
|
||||
}
|
||||
|
||||
func (s *cacheShard) get(index uint64) (obj interface{}, found bool) {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
obj, found = s.items[index]
|
||||
return
|
||||
}
|
|
@ -0,0 +1,102 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/golang-lru"
|
||||
)
|
||||
|
||||
// Clock defines an interface for obtaining the current time
|
||||
type Clock interface {
|
||||
Now() time.Time
|
||||
}
|
||||
|
||||
// realClock implements the Clock interface by calling time.Now()
|
||||
type realClock struct{}
|
||||
|
||||
func (realClock) Now() time.Time { return time.Now() }
|
||||
|
||||
// LRUExpireCache is a cache that ensures the mostly recently accessed keys are returned with
|
||||
// a ttl beyond which keys are forcibly expired.
|
||||
type LRUExpireCache struct {
|
||||
// clock is used to obtain the current time
|
||||
clock Clock
|
||||
|
||||
cache *lru.Cache
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
// NewLRUExpireCache creates an expiring cache with the given size
|
||||
func NewLRUExpireCache(maxSize int) *LRUExpireCache {
|
||||
return NewLRUExpireCacheWithClock(maxSize, realClock{})
|
||||
}
|
||||
|
||||
// NewLRUExpireCacheWithClock creates an expiring cache with the given size, using the specified clock to obtain the current time.
|
||||
func NewLRUExpireCacheWithClock(maxSize int, clock Clock) *LRUExpireCache {
|
||||
cache, err := lru.New(maxSize)
|
||||
if err != nil {
|
||||
// if called with an invalid size
|
||||
panic(err)
|
||||
}
|
||||
return &LRUExpireCache{clock: clock, cache: cache}
|
||||
}
|
||||
|
||||
type cacheEntry struct {
|
||||
value interface{}
|
||||
expireTime time.Time
|
||||
}
|
||||
|
||||
// Add adds the value to the cache at key with the specified maximum duration.
|
||||
func (c *LRUExpireCache) Add(key interface{}, value interface{}, ttl time.Duration) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
c.cache.Add(key, &cacheEntry{value, c.clock.Now().Add(ttl)})
|
||||
}
|
||||
|
||||
// Get returns the value at the specified key from the cache if it exists and is not
|
||||
// expired, or returns false.
|
||||
func (c *LRUExpireCache) Get(key interface{}) (interface{}, bool) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
e, ok := c.cache.Get(key)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
if c.clock.Now().After(e.(*cacheEntry).expireTime) {
|
||||
c.cache.Remove(key)
|
||||
return nil, false
|
||||
}
|
||||
return e.(*cacheEntry).value, true
|
||||
}
|
||||
|
||||
// Remove removes the specified key from the cache if it exists
|
||||
func (c *LRUExpireCache) Remove(key interface{}) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
c.cache.Remove(key)
|
||||
}
|
||||
|
||||
// Keys returns all the keys in the cache, even if they are expired. Subsequent calls to
|
||||
// get may return not found. It returns all keys from oldest to newest.
|
||||
func (c *LRUExpireCache) Keys() []interface{} {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
return c.cache.Keys()
|
||||
}
|
|
@ -0,0 +1,394 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
)
|
||||
|
||||
// Config contains all the settings for a Controller.
|
||||
type Config struct {
|
||||
// The queue for your objects; either a FIFO or
|
||||
// a DeltaFIFO. Your Process() function should accept
|
||||
// the output of this Queue's Pop() method.
|
||||
Queue
|
||||
|
||||
// Something that can list and watch your objects.
|
||||
ListerWatcher
|
||||
|
||||
// Something that can process your objects.
|
||||
Process ProcessFunc
|
||||
|
||||
// The type of your objects.
|
||||
ObjectType runtime.Object
|
||||
|
||||
// Reprocess everything at least this often.
|
||||
// Note that if it takes longer for you to clear the queue than this
|
||||
// period, you will end up processing items in the order determined
|
||||
// by FIFO.Replace(). Currently, this is random. If this is a
|
||||
// problem, we can change that replacement policy to append new
|
||||
// things to the end of the queue instead of replacing the entire
|
||||
// queue.
|
||||
FullResyncPeriod time.Duration
|
||||
|
||||
// ShouldResync, if specified, is invoked when the controller's reflector determines the next
|
||||
// periodic sync should occur. If this returns true, it means the reflector should proceed with
|
||||
// the resync.
|
||||
ShouldResync ShouldResyncFunc
|
||||
|
||||
// If true, when Process() returns an error, re-enqueue the object.
|
||||
// TODO: add interface to let you inject a delay/backoff or drop
|
||||
// the object completely if desired. Pass the object in
|
||||
// question to this interface as a parameter.
|
||||
RetryOnError bool
|
||||
}
|
||||
|
||||
// ShouldResyncFunc is a type of function that indicates if a reflector should perform a
|
||||
// resync or not. It can be used by a shared informer to support multiple event handlers with custom
|
||||
// resync periods.
|
||||
type ShouldResyncFunc func() bool
|
||||
|
||||
// ProcessFunc processes a single object.
|
||||
type ProcessFunc func(obj interface{}) error
|
||||
|
||||
// Controller is a generic controller framework.
|
||||
type controller struct {
|
||||
config Config
|
||||
reflector *Reflector
|
||||
reflectorMutex sync.RWMutex
|
||||
clock clock.Clock
|
||||
}
|
||||
|
||||
type Controller interface {
|
||||
Run(stopCh <-chan struct{})
|
||||
HasSynced() bool
|
||||
LastSyncResourceVersion() string
|
||||
}
|
||||
|
||||
// New makes a new Controller from the given Config.
|
||||
func New(c *Config) Controller {
|
||||
ctlr := &controller{
|
||||
config: *c,
|
||||
clock: &clock.RealClock{},
|
||||
}
|
||||
return ctlr
|
||||
}
|
||||
|
||||
// Run begins processing items, and will continue until a value is sent down stopCh.
|
||||
// It's an error to call Run more than once.
|
||||
// Run blocks; call via go.
|
||||
func (c *controller) Run(stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
go func() {
|
||||
<-stopCh
|
||||
c.config.Queue.Close()
|
||||
}()
|
||||
r := NewReflector(
|
||||
c.config.ListerWatcher,
|
||||
c.config.ObjectType,
|
||||
c.config.Queue,
|
||||
c.config.FullResyncPeriod,
|
||||
)
|
||||
r.ShouldResync = c.config.ShouldResync
|
||||
r.clock = c.clock
|
||||
|
||||
c.reflectorMutex.Lock()
|
||||
c.reflector = r
|
||||
c.reflectorMutex.Unlock()
|
||||
|
||||
var wg wait.Group
|
||||
defer wg.Wait()
|
||||
|
||||
wg.StartWithChannel(stopCh, r.Run)
|
||||
|
||||
wait.Until(c.processLoop, time.Second, stopCh)
|
||||
}
|
||||
|
||||
// Returns true once this controller has completed an initial resource listing
|
||||
func (c *controller) HasSynced() bool {
|
||||
return c.config.Queue.HasSynced()
|
||||
}
|
||||
|
||||
func (c *controller) LastSyncResourceVersion() string {
|
||||
if c.reflector == nil {
|
||||
return ""
|
||||
}
|
||||
return c.reflector.LastSyncResourceVersion()
|
||||
}
|
||||
|
||||
// processLoop drains the work queue.
|
||||
// TODO: Consider doing the processing in parallel. This will require a little thought
|
||||
// to make sure that we don't end up processing the same object multiple times
|
||||
// concurrently.
|
||||
//
|
||||
// TODO: Plumb through the stopCh here (and down to the queue) so that this can
|
||||
// actually exit when the controller is stopped. Or just give up on this stuff
|
||||
// ever being stoppable. Converting this whole package to use Context would
|
||||
// also be helpful.
|
||||
func (c *controller) processLoop() {
|
||||
for {
|
||||
obj, err := c.config.Queue.Pop(PopProcessFunc(c.config.Process))
|
||||
if err != nil {
|
||||
if err == FIFOClosedError {
|
||||
return
|
||||
}
|
||||
if c.config.RetryOnError {
|
||||
// This is the safe way to re-enqueue.
|
||||
c.config.Queue.AddIfNotPresent(obj)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ResourceEventHandler can handle notifications for events that happen to a
|
||||
// resource. The events are informational only, so you can't return an
|
||||
// error.
|
||||
// * OnAdd is called when an object is added.
|
||||
// * OnUpdate is called when an object is modified. Note that oldObj is the
|
||||
// last known state of the object-- it is possible that several changes
|
||||
// were combined together, so you can't use this to see every single
|
||||
// change. OnUpdate is also called when a re-list happens, and it will
|
||||
// get called even if nothing changed. This is useful for periodically
|
||||
// evaluating or syncing something.
|
||||
// * OnDelete will get the final state of the item if it is known, otherwise
|
||||
// it will get an object of type DeletedFinalStateUnknown. This can
|
||||
// happen if the watch is closed and misses the delete event and we don't
|
||||
// notice the deletion until the subsequent re-list.
|
||||
type ResourceEventHandler interface {
|
||||
OnAdd(obj interface{})
|
||||
OnUpdate(oldObj, newObj interface{})
|
||||
OnDelete(obj interface{})
|
||||
}
|
||||
|
||||
// ResourceEventHandlerFuncs is an adaptor to let you easily specify as many or
|
||||
// as few of the notification functions as you want while still implementing
|
||||
// ResourceEventHandler.
|
||||
type ResourceEventHandlerFuncs struct {
|
||||
AddFunc func(obj interface{})
|
||||
UpdateFunc func(oldObj, newObj interface{})
|
||||
DeleteFunc func(obj interface{})
|
||||
}
|
||||
|
||||
// OnAdd calls AddFunc if it's not nil.
|
||||
func (r ResourceEventHandlerFuncs) OnAdd(obj interface{}) {
|
||||
if r.AddFunc != nil {
|
||||
r.AddFunc(obj)
|
||||
}
|
||||
}
|
||||
|
||||
// OnUpdate calls UpdateFunc if it's not nil.
|
||||
func (r ResourceEventHandlerFuncs) OnUpdate(oldObj, newObj interface{}) {
|
||||
if r.UpdateFunc != nil {
|
||||
r.UpdateFunc(oldObj, newObj)
|
||||
}
|
||||
}
|
||||
|
||||
// OnDelete calls DeleteFunc if it's not nil.
|
||||
func (r ResourceEventHandlerFuncs) OnDelete(obj interface{}) {
|
||||
if r.DeleteFunc != nil {
|
||||
r.DeleteFunc(obj)
|
||||
}
|
||||
}
|
||||
|
||||
// FilteringResourceEventHandler applies the provided filter to all events coming
|
||||
// in, ensuring the appropriate nested handler method is invoked. An object
|
||||
// that starts passing the filter after an update is considered an add, and an
|
||||
// object that stops passing the filter after an update is considered a delete.
|
||||
type FilteringResourceEventHandler struct {
|
||||
FilterFunc func(obj interface{}) bool
|
||||
Handler ResourceEventHandler
|
||||
}
|
||||
|
||||
// OnAdd calls the nested handler only if the filter succeeds
|
||||
func (r FilteringResourceEventHandler) OnAdd(obj interface{}) {
|
||||
if !r.FilterFunc(obj) {
|
||||
return
|
||||
}
|
||||
r.Handler.OnAdd(obj)
|
||||
}
|
||||
|
||||
// OnUpdate ensures the proper handler is called depending on whether the filter matches
|
||||
func (r FilteringResourceEventHandler) OnUpdate(oldObj, newObj interface{}) {
|
||||
newer := r.FilterFunc(newObj)
|
||||
older := r.FilterFunc(oldObj)
|
||||
switch {
|
||||
case newer && older:
|
||||
r.Handler.OnUpdate(oldObj, newObj)
|
||||
case newer && !older:
|
||||
r.Handler.OnAdd(newObj)
|
||||
case !newer && older:
|
||||
r.Handler.OnDelete(oldObj)
|
||||
default:
|
||||
// do nothing
|
||||
}
|
||||
}
|
||||
|
||||
// OnDelete calls the nested handler only if the filter succeeds
|
||||
func (r FilteringResourceEventHandler) OnDelete(obj interface{}) {
|
||||
if !r.FilterFunc(obj) {
|
||||
return
|
||||
}
|
||||
r.Handler.OnDelete(obj)
|
||||
}
|
||||
|
||||
// DeletionHandlingMetaNamespaceKeyFunc checks for
|
||||
// DeletedFinalStateUnknown objects before calling
|
||||
// MetaNamespaceKeyFunc.
|
||||
func DeletionHandlingMetaNamespaceKeyFunc(obj interface{}) (string, error) {
|
||||
if d, ok := obj.(DeletedFinalStateUnknown); ok {
|
||||
return d.Key, nil
|
||||
}
|
||||
return MetaNamespaceKeyFunc(obj)
|
||||
}
|
||||
|
||||
// NewInformer returns a Store and a controller for populating the store
|
||||
// while also providing event notifications. You should only used the returned
|
||||
// Store for Get/List operations; Add/Modify/Deletes will cause the event
|
||||
// notifications to be faulty.
|
||||
//
|
||||
// Parameters:
|
||||
// * lw is list and watch functions for the source of the resource you want to
|
||||
// be informed of.
|
||||
// * objType is an object of the type that you expect to receive.
|
||||
// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate
|
||||
// calls, even if nothing changed). Otherwise, re-list will be delayed as
|
||||
// long as possible (until the upstream source closes the watch or times out,
|
||||
// or you stop the controller).
|
||||
// * h is the object you want notifications sent to.
|
||||
//
|
||||
func NewInformer(
|
||||
lw ListerWatcher,
|
||||
objType runtime.Object,
|
||||
resyncPeriod time.Duration,
|
||||
h ResourceEventHandler,
|
||||
) (Store, Controller) {
|
||||
// This will hold the client state, as we know it.
|
||||
clientState := NewStore(DeletionHandlingMetaNamespaceKeyFunc)
|
||||
|
||||
// This will hold incoming changes. Note how we pass clientState in as a
|
||||
// KeyLister, that way resync operations will result in the correct set
|
||||
// of update/delete deltas.
|
||||
fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, nil, clientState)
|
||||
|
||||
cfg := &Config{
|
||||
Queue: fifo,
|
||||
ListerWatcher: lw,
|
||||
ObjectType: objType,
|
||||
FullResyncPeriod: resyncPeriod,
|
||||
RetryOnError: false,
|
||||
|
||||
Process: func(obj interface{}) error {
|
||||
// from oldest to newest
|
||||
for _, d := range obj.(Deltas) {
|
||||
switch d.Type {
|
||||
case Sync, Added, Updated:
|
||||
if old, exists, err := clientState.Get(d.Object); err == nil && exists {
|
||||
if err := clientState.Update(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
h.OnUpdate(old, d.Object)
|
||||
} else {
|
||||
if err := clientState.Add(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
h.OnAdd(d.Object)
|
||||
}
|
||||
case Deleted:
|
||||
if err := clientState.Delete(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
h.OnDelete(d.Object)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
return clientState, New(cfg)
|
||||
}
|
||||
|
||||
// NewIndexerInformer returns a Indexer and a controller for populating the index
|
||||
// while also providing event notifications. You should only used the returned
|
||||
// Index for Get/List operations; Add/Modify/Deletes will cause the event
|
||||
// notifications to be faulty.
|
||||
//
|
||||
// Parameters:
|
||||
// * lw is list and watch functions for the source of the resource you want to
|
||||
// be informed of.
|
||||
// * objType is an object of the type that you expect to receive.
|
||||
// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate
|
||||
// calls, even if nothing changed). Otherwise, re-list will be delayed as
|
||||
// long as possible (until the upstream source closes the watch or times out,
|
||||
// or you stop the controller).
|
||||
// * h is the object you want notifications sent to.
|
||||
// * indexers is the indexer for the received object type.
|
||||
//
|
||||
func NewIndexerInformer(
|
||||
lw ListerWatcher,
|
||||
objType runtime.Object,
|
||||
resyncPeriod time.Duration,
|
||||
h ResourceEventHandler,
|
||||
indexers Indexers,
|
||||
) (Indexer, Controller) {
|
||||
// This will hold the client state, as we know it.
|
||||
clientState := NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers)
|
||||
|
||||
// This will hold incoming changes. Note how we pass clientState in as a
|
||||
// KeyLister, that way resync operations will result in the correct set
|
||||
// of update/delete deltas.
|
||||
fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, nil, clientState)
|
||||
|
||||
cfg := &Config{
|
||||
Queue: fifo,
|
||||
ListerWatcher: lw,
|
||||
ObjectType: objType,
|
||||
FullResyncPeriod: resyncPeriod,
|
||||
RetryOnError: false,
|
||||
|
||||
Process: func(obj interface{}) error {
|
||||
// from oldest to newest
|
||||
for _, d := range obj.(Deltas) {
|
||||
switch d.Type {
|
||||
case Sync, Added, Updated:
|
||||
if old, exists, err := clientState.Get(d.Object); err == nil && exists {
|
||||
if err := clientState.Update(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
h.OnUpdate(old, d.Object)
|
||||
} else {
|
||||
if err := clientState.Add(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
h.OnAdd(d.Object)
|
||||
}
|
||||
case Deleted:
|
||||
if err := clientState.Delete(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
h.OnDelete(d.Object)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
return clientState, New(cfg)
|
||||
}
|
|
@ -0,0 +1,681 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// NewDeltaFIFO returns a Store which can be used process changes to items.
|
||||
//
|
||||
// keyFunc is used to figure out what key an object should have. (It's
|
||||
// exposed in the returned DeltaFIFO's KeyOf() method, with bonus features.)
|
||||
//
|
||||
// 'compressor' may compress as many or as few items as it wants
|
||||
// (including returning an empty slice), but it should do what it
|
||||
// does quickly since it is called while the queue is locked.
|
||||
// 'compressor' may be nil if you don't want any delta compression.
|
||||
//
|
||||
// 'keyLister' is expected to return a list of keys that the consumer of
|
||||
// this queue "knows about". It is used to decide which items are missing
|
||||
// when Replace() is called; 'Deleted' deltas are produced for these items.
|
||||
// It may be nil if you don't need to detect all deletions.
|
||||
// TODO: consider merging keyLister with this object, tracking a list of
|
||||
// "known" keys when Pop() is called. Have to think about how that
|
||||
// affects error retrying.
|
||||
// TODO(lavalamp): I believe there is a possible race only when using an
|
||||
// external known object source that the above TODO would
|
||||
// fix.
|
||||
//
|
||||
// Also see the comment on DeltaFIFO.
|
||||
func NewDeltaFIFO(keyFunc KeyFunc, compressor DeltaCompressor, knownObjects KeyListerGetter) *DeltaFIFO {
|
||||
f := &DeltaFIFO{
|
||||
items: map[string]Deltas{},
|
||||
queue: []string{},
|
||||
keyFunc: keyFunc,
|
||||
deltaCompressor: compressor,
|
||||
knownObjects: knownObjects,
|
||||
}
|
||||
f.cond.L = &f.lock
|
||||
return f
|
||||
}
|
||||
|
||||
// DeltaFIFO is like FIFO, but allows you to process deletes.
|
||||
//
|
||||
// DeltaFIFO is a producer-consumer queue, where a Reflector is
|
||||
// intended to be the producer, and the consumer is whatever calls
|
||||
// the Pop() method.
|
||||
//
|
||||
// DeltaFIFO solves this use case:
|
||||
// * You want to process every object change (delta) at most once.
|
||||
// * When you process an object, you want to see everything
|
||||
// that's happened to it since you last processed it.
|
||||
// * You want to process the deletion of objects.
|
||||
// * You might want to periodically reprocess objects.
|
||||
//
|
||||
// DeltaFIFO's Pop(), Get(), and GetByKey() methods return
|
||||
// interface{} to satisfy the Store/Queue interfaces, but it
|
||||
// will always return an object of type Deltas.
|
||||
//
|
||||
// A note on threading: If you call Pop() in parallel from multiple
|
||||
// threads, you could end up with multiple threads processing slightly
|
||||
// different versions of the same object.
|
||||
//
|
||||
// A note on the KeyLister used by the DeltaFIFO: It's main purpose is
|
||||
// to list keys that are "known", for the purpose of figuring out which
|
||||
// items have been deleted when Replace() or Delete() are called. The deleted
|
||||
// object will be included in the DeleteFinalStateUnknown markers. These objects
|
||||
// could be stale.
|
||||
//
|
||||
// You may provide a function to compress deltas (e.g., represent a
|
||||
// series of Updates as a single Update).
|
||||
type DeltaFIFO struct {
|
||||
// lock/cond protects access to 'items' and 'queue'.
|
||||
lock sync.RWMutex
|
||||
cond sync.Cond
|
||||
|
||||
// We depend on the property that items in the set are in
|
||||
// the queue and vice versa, and that all Deltas in this
|
||||
// map have at least one Delta.
|
||||
items map[string]Deltas
|
||||
queue []string
|
||||
|
||||
// populated is true if the first batch of items inserted by Replace() has been populated
|
||||
// or Delete/Add/Update was called first.
|
||||
populated bool
|
||||
// initialPopulationCount is the number of items inserted by the first call of Replace()
|
||||
initialPopulationCount int
|
||||
|
||||
// keyFunc is used to make the key used for queued item
|
||||
// insertion and retrieval, and should be deterministic.
|
||||
keyFunc KeyFunc
|
||||
|
||||
// deltaCompressor tells us how to combine two or more
|
||||
// deltas. It may be nil.
|
||||
deltaCompressor DeltaCompressor
|
||||
|
||||
// knownObjects list keys that are "known", for the
|
||||
// purpose of figuring out which items have been deleted
|
||||
// when Replace() or Delete() is called.
|
||||
knownObjects KeyListerGetter
|
||||
|
||||
// Indication the queue is closed.
|
||||
// Used to indicate a queue is closed so a control loop can exit when a queue is empty.
|
||||
// Currently, not used to gate any of CRED operations.
|
||||
closed bool
|
||||
closedLock sync.Mutex
|
||||
}
|
||||
|
||||
var (
|
||||
_ = Queue(&DeltaFIFO{}) // DeltaFIFO is a Queue
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrZeroLengthDeltasObject is returned in a KeyError if a Deltas
|
||||
// object with zero length is encountered (should be impossible,
|
||||
// even if such an object is accidentally produced by a DeltaCompressor--
|
||||
// but included for completeness).
|
||||
ErrZeroLengthDeltasObject = errors.New("0 length Deltas object; can't get key")
|
||||
)
|
||||
|
||||
// Close the queue.
|
||||
func (f *DeltaFIFO) Close() {
|
||||
f.closedLock.Lock()
|
||||
defer f.closedLock.Unlock()
|
||||
f.closed = true
|
||||
f.cond.Broadcast()
|
||||
}
|
||||
|
||||
// KeyOf exposes f's keyFunc, but also detects the key of a Deltas object or
|
||||
// DeletedFinalStateUnknown objects.
|
||||
func (f *DeltaFIFO) KeyOf(obj interface{}) (string, error) {
|
||||
if d, ok := obj.(Deltas); ok {
|
||||
if len(d) == 0 {
|
||||
return "", KeyError{obj, ErrZeroLengthDeltasObject}
|
||||
}
|
||||
obj = d.Newest().Object
|
||||
}
|
||||
if d, ok := obj.(DeletedFinalStateUnknown); ok {
|
||||
return d.Key, nil
|
||||
}
|
||||
return f.keyFunc(obj)
|
||||
}
|
||||
|
||||
// Return true if an Add/Update/Delete/AddIfNotPresent are called first,
|
||||
// or an Update called first but the first batch of items inserted by Replace() has been popped
|
||||
func (f *DeltaFIFO) HasSynced() bool {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
return f.populated && f.initialPopulationCount == 0
|
||||
}
|
||||
|
||||
// Add inserts an item, and puts it in the queue. The item is only enqueued
|
||||
// if it doesn't already exist in the set.
|
||||
func (f *DeltaFIFO) Add(obj interface{}) error {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
f.populated = true
|
||||
return f.queueActionLocked(Added, obj)
|
||||
}
|
||||
|
||||
// Update is just like Add, but makes an Updated Delta.
|
||||
func (f *DeltaFIFO) Update(obj interface{}) error {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
f.populated = true
|
||||
return f.queueActionLocked(Updated, obj)
|
||||
}
|
||||
|
||||
// Delete is just like Add, but makes an Deleted Delta. If the item does not
|
||||
// already exist, it will be ignored. (It may have already been deleted by a
|
||||
// Replace (re-list), for example.
|
||||
func (f *DeltaFIFO) Delete(obj interface{}) error {
|
||||
id, err := f.KeyOf(obj)
|
||||
if err != nil {
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
f.populated = true
|
||||
if f.knownObjects == nil {
|
||||
if _, exists := f.items[id]; !exists {
|
||||
// Presumably, this was deleted when a relist happened.
|
||||
// Don't provide a second report of the same deletion.
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
// We only want to skip the "deletion" action if the object doesn't
|
||||
// exist in knownObjects and it doesn't have corresponding item in items.
|
||||
// Note that even if there is a "deletion" action in items, we can ignore it,
|
||||
// because it will be deduped automatically in "queueActionLocked"
|
||||
_, exists, err := f.knownObjects.GetByKey(id)
|
||||
_, itemsExist := f.items[id]
|
||||
if err == nil && !exists && !itemsExist {
|
||||
// Presumably, this was deleted when a relist happened.
|
||||
// Don't provide a second report of the same deletion.
|
||||
// TODO(lavalamp): This may be racy-- we aren't properly locked
|
||||
// with knownObjects.
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return f.queueActionLocked(Deleted, obj)
|
||||
}
|
||||
|
||||
// AddIfNotPresent inserts an item, and puts it in the queue. If the item is already
|
||||
// present in the set, it is neither enqueued nor added to the set.
|
||||
//
|
||||
// This is useful in a single producer/consumer scenario so that the consumer can
|
||||
// safely retry items without contending with the producer and potentially enqueueing
|
||||
// stale items.
|
||||
//
|
||||
// Important: obj must be a Deltas (the output of the Pop() function). Yes, this is
|
||||
// different from the Add/Update/Delete functions.
|
||||
func (f *DeltaFIFO) AddIfNotPresent(obj interface{}) error {
|
||||
deltas, ok := obj.(Deltas)
|
||||
if !ok {
|
||||
return fmt.Errorf("object must be of type deltas, but got: %#v", obj)
|
||||
}
|
||||
id, err := f.KeyOf(deltas.Newest().Object)
|
||||
if err != nil {
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
f.addIfNotPresent(id, deltas)
|
||||
return nil
|
||||
}
|
||||
|
||||
// addIfNotPresent inserts deltas under id if it does not exist, and assumes the caller
|
||||
// already holds the fifo lock.
|
||||
func (f *DeltaFIFO) addIfNotPresent(id string, deltas Deltas) {
|
||||
f.populated = true
|
||||
if _, exists := f.items[id]; exists {
|
||||
return
|
||||
}
|
||||
|
||||
f.queue = append(f.queue, id)
|
||||
f.items[id] = deltas
|
||||
f.cond.Broadcast()
|
||||
}
|
||||
|
||||
// re-listing and watching can deliver the same update multiple times in any
|
||||
// order. This will combine the most recent two deltas if they are the same.
|
||||
func dedupDeltas(deltas Deltas) Deltas {
|
||||
n := len(deltas)
|
||||
if n < 2 {
|
||||
return deltas
|
||||
}
|
||||
a := &deltas[n-1]
|
||||
b := &deltas[n-2]
|
||||
if out := isDup(a, b); out != nil {
|
||||
d := append(Deltas{}, deltas[:n-2]...)
|
||||
return append(d, *out)
|
||||
}
|
||||
return deltas
|
||||
}
|
||||
|
||||
// If a & b represent the same event, returns the delta that ought to be kept.
|
||||
// Otherwise, returns nil.
|
||||
// TODO: is there anything other than deletions that need deduping?
|
||||
func isDup(a, b *Delta) *Delta {
|
||||
if out := isDeletionDup(a, b); out != nil {
|
||||
return out
|
||||
}
|
||||
// TODO: Detect other duplicate situations? Are there any?
|
||||
return nil
|
||||
}
|
||||
|
||||
// keep the one with the most information if both are deletions.
|
||||
func isDeletionDup(a, b *Delta) *Delta {
|
||||
if b.Type != Deleted || a.Type != Deleted {
|
||||
return nil
|
||||
}
|
||||
// Do more sophisticated checks, or is this sufficient?
|
||||
if _, ok := b.Object.(DeletedFinalStateUnknown); ok {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// willObjectBeDeletedLocked returns true only if the last delta for the
|
||||
// given object is Delete. Caller must lock first.
|
||||
func (f *DeltaFIFO) willObjectBeDeletedLocked(id string) bool {
|
||||
deltas := f.items[id]
|
||||
return len(deltas) > 0 && deltas[len(deltas)-1].Type == Deleted
|
||||
}
|
||||
|
||||
// queueActionLocked appends to the delta list for the object, calling
|
||||
// f.deltaCompressor if needed. Caller must lock first.
|
||||
func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) error {
|
||||
id, err := f.KeyOf(obj)
|
||||
if err != nil {
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
|
||||
// If object is supposed to be deleted (last event is Deleted),
|
||||
// then we should ignore Sync events, because it would result in
|
||||
// recreation of this object.
|
||||
if actionType == Sync && f.willObjectBeDeletedLocked(id) {
|
||||
return nil
|
||||
}
|
||||
|
||||
newDeltas := append(f.items[id], Delta{actionType, obj})
|
||||
newDeltas = dedupDeltas(newDeltas)
|
||||
if f.deltaCompressor != nil {
|
||||
newDeltas = f.deltaCompressor.Compress(newDeltas)
|
||||
}
|
||||
|
||||
_, exists := f.items[id]
|
||||
if len(newDeltas) > 0 {
|
||||
if !exists {
|
||||
f.queue = append(f.queue, id)
|
||||
}
|
||||
f.items[id] = newDeltas
|
||||
f.cond.Broadcast()
|
||||
} else if exists {
|
||||
// The compression step removed all deltas, so
|
||||
// we need to remove this from our map (extra items
|
||||
// in the queue are ignored if they are not in the
|
||||
// map).
|
||||
delete(f.items, id)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// List returns a list of all the items; it returns the object
|
||||
// from the most recent Delta.
|
||||
// You should treat the items returned inside the deltas as immutable.
|
||||
func (f *DeltaFIFO) List() []interface{} {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
return f.listLocked()
|
||||
}
|
||||
|
||||
func (f *DeltaFIFO) listLocked() []interface{} {
|
||||
list := make([]interface{}, 0, len(f.items))
|
||||
for _, item := range f.items {
|
||||
// Copy item's slice so operations on this slice (delta
|
||||
// compression) won't interfere with the object we return.
|
||||
item = copyDeltas(item)
|
||||
list = append(list, item.Newest().Object)
|
||||
}
|
||||
return list
|
||||
}
|
||||
|
||||
// ListKeys returns a list of all the keys of the objects currently
|
||||
// in the FIFO.
|
||||
func (f *DeltaFIFO) ListKeys() []string {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
list := make([]string, 0, len(f.items))
|
||||
for key := range f.items {
|
||||
list = append(list, key)
|
||||
}
|
||||
return list
|
||||
}
|
||||
|
||||
// Get returns the complete list of deltas for the requested item,
|
||||
// or sets exists=false.
|
||||
// You should treat the items returned inside the deltas as immutable.
|
||||
func (f *DeltaFIFO) Get(obj interface{}) (item interface{}, exists bool, err error) {
|
||||
key, err := f.KeyOf(obj)
|
||||
if err != nil {
|
||||
return nil, false, KeyError{obj, err}
|
||||
}
|
||||
return f.GetByKey(key)
|
||||
}
|
||||
|
||||
// GetByKey returns the complete list of deltas for the requested item,
|
||||
// setting exists=false if that list is empty.
|
||||
// You should treat the items returned inside the deltas as immutable.
|
||||
func (f *DeltaFIFO) GetByKey(key string) (item interface{}, exists bool, err error) {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
d, exists := f.items[key]
|
||||
if exists {
|
||||
// Copy item's slice so operations on this slice (delta
|
||||
// compression) won't interfere with the object we return.
|
||||
d = copyDeltas(d)
|
||||
}
|
||||
return d, exists, nil
|
||||
}
|
||||
|
||||
// Checks if the queue is closed
|
||||
func (f *DeltaFIFO) IsClosed() bool {
|
||||
f.closedLock.Lock()
|
||||
defer f.closedLock.Unlock()
|
||||
if f.closed {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Pop blocks until an item is added to the queue, and then returns it. If
|
||||
// multiple items are ready, they are returned in the order in which they were
|
||||
// added/updated. The item is removed from the queue (and the store) before it
|
||||
// is returned, so if you don't successfully process it, you need to add it back
|
||||
// with AddIfNotPresent().
|
||||
// process function is called under lock, so it is safe update data structures
|
||||
// in it that need to be in sync with the queue (e.g. knownKeys). The PopProcessFunc
|
||||
// may return an instance of ErrRequeue with a nested error to indicate the current
|
||||
// item should be requeued (equivalent to calling AddIfNotPresent under the lock).
|
||||
//
|
||||
// Pop returns a 'Deltas', which has a complete list of all the things
|
||||
// that happened to the object (deltas) while it was sitting in the queue.
|
||||
func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
for {
|
||||
for len(f.queue) == 0 {
|
||||
// When the queue is empty, invocation of Pop() is blocked until new item is enqueued.
|
||||
// When Close() is called, the f.closed is set and the condition is broadcasted.
|
||||
// Which causes this loop to continue and return from the Pop().
|
||||
if f.IsClosed() {
|
||||
return nil, FIFOClosedError
|
||||
}
|
||||
|
||||
f.cond.Wait()
|
||||
}
|
||||
id := f.queue[0]
|
||||
f.queue = f.queue[1:]
|
||||
item, ok := f.items[id]
|
||||
if f.initialPopulationCount > 0 {
|
||||
f.initialPopulationCount--
|
||||
}
|
||||
if !ok {
|
||||
// Item may have been deleted subsequently.
|
||||
continue
|
||||
}
|
||||
delete(f.items, id)
|
||||
err := process(item)
|
||||
if e, ok := err.(ErrRequeue); ok {
|
||||
f.addIfNotPresent(id, item)
|
||||
err = e.Err
|
||||
}
|
||||
// Don't need to copyDeltas here, because we're transferring
|
||||
// ownership to the caller.
|
||||
return item, err
|
||||
}
|
||||
}
|
||||
|
||||
// Replace will delete the contents of 'f', using instead the given map.
|
||||
// 'f' takes ownership of the map, you should not reference the map again
|
||||
// after calling this function. f's queue is reset, too; upon return, it
|
||||
// will contain the items in the map, in no particular order.
|
||||
func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
keys := make(sets.String, len(list))
|
||||
|
||||
for _, item := range list {
|
||||
key, err := f.KeyOf(item)
|
||||
if err != nil {
|
||||
return KeyError{item, err}
|
||||
}
|
||||
keys.Insert(key)
|
||||
if err := f.queueActionLocked(Sync, item); err != nil {
|
||||
return fmt.Errorf("couldn't enqueue object: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if f.knownObjects == nil {
|
||||
// Do deletion detection against our own list.
|
||||
for k, oldItem := range f.items {
|
||||
if keys.Has(k) {
|
||||
continue
|
||||
}
|
||||
var deletedObj interface{}
|
||||
if n := oldItem.Newest(); n != nil {
|
||||
deletedObj = n.Object
|
||||
}
|
||||
if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if !f.populated {
|
||||
f.populated = true
|
||||
f.initialPopulationCount = len(list)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Detect deletions not already in the queue.
|
||||
// TODO(lavalamp): This may be racy-- we aren't properly locked
|
||||
// with knownObjects. Unproven.
|
||||
knownKeys := f.knownObjects.ListKeys()
|
||||
queuedDeletions := 0
|
||||
for _, k := range knownKeys {
|
||||
if keys.Has(k) {
|
||||
continue
|
||||
}
|
||||
|
||||
deletedObj, exists, err := f.knownObjects.GetByKey(k)
|
||||
if err != nil {
|
||||
deletedObj = nil
|
||||
glog.Errorf("Unexpected error %v during lookup of key %v, placing DeleteFinalStateUnknown marker without object", err, k)
|
||||
} else if !exists {
|
||||
deletedObj = nil
|
||||
glog.Infof("Key %v does not exist in known objects store, placing DeleteFinalStateUnknown marker without object", k)
|
||||
}
|
||||
queuedDeletions++
|
||||
if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if !f.populated {
|
||||
f.populated = true
|
||||
f.initialPopulationCount = len(list) + queuedDeletions
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Resync will send a sync event for each item
|
||||
func (f *DeltaFIFO) Resync() error {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
keys := f.knownObjects.ListKeys()
|
||||
for _, k := range keys {
|
||||
if err := f.syncKeyLocked(k); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *DeltaFIFO) syncKey(key string) error {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
return f.syncKeyLocked(key)
|
||||
}
|
||||
|
||||
func (f *DeltaFIFO) syncKeyLocked(key string) error {
|
||||
obj, exists, err := f.knownObjects.GetByKey(key)
|
||||
if err != nil {
|
||||
glog.Errorf("Unexpected error %v during lookup of key %v, unable to queue object for sync", err, key)
|
||||
return nil
|
||||
} else if !exists {
|
||||
glog.Infof("Key %v does not exist in known objects store, unable to queue object for sync", key)
|
||||
return nil
|
||||
}
|
||||
|
||||
// If we are doing Resync() and there is already an event queued for that object,
|
||||
// we ignore the Resync for it. This is to avoid the race, in which the resync
|
||||
// comes with the previous value of object (since queueing an event for the object
|
||||
// doesn't trigger changing the underlying store <knownObjects>.
|
||||
id, err := f.KeyOf(obj)
|
||||
if err != nil {
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
if len(f.items[id]) > 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := f.queueActionLocked(Sync, obj); err != nil {
|
||||
return fmt.Errorf("couldn't queue object: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// A KeyListerGetter is anything that knows how to list its keys and look up by key.
|
||||
type KeyListerGetter interface {
|
||||
KeyLister
|
||||
KeyGetter
|
||||
}
|
||||
|
||||
// A KeyLister is anything that knows how to list its keys.
|
||||
type KeyLister interface {
|
||||
ListKeys() []string
|
||||
}
|
||||
|
||||
// A KeyGetter is anything that knows how to get the value stored under a given key.
|
||||
type KeyGetter interface {
|
||||
GetByKey(key string) (interface{}, bool, error)
|
||||
}
|
||||
|
||||
// DeltaCompressor is an algorithm that removes redundant changes.
|
||||
type DeltaCompressor interface {
|
||||
Compress(Deltas) Deltas
|
||||
}
|
||||
|
||||
// DeltaCompressorFunc should remove redundant changes; but changes that
|
||||
// are redundant depend on one's desired semantics, so this is an
|
||||
// injectable function.
|
||||
//
|
||||
// DeltaCompressorFunc adapts a raw function to be a DeltaCompressor.
|
||||
type DeltaCompressorFunc func(Deltas) Deltas
|
||||
|
||||
// Compress just calls dc.
|
||||
func (dc DeltaCompressorFunc) Compress(d Deltas) Deltas {
|
||||
return dc(d)
|
||||
}
|
||||
|
||||
// DeltaType is the type of a change (addition, deletion, etc)
|
||||
type DeltaType string
|
||||
|
||||
const (
|
||||
Added DeltaType = "Added"
|
||||
Updated DeltaType = "Updated"
|
||||
Deleted DeltaType = "Deleted"
|
||||
// The other types are obvious. You'll get Sync deltas when:
|
||||
// * A watch expires/errors out and a new list/watch cycle is started.
|
||||
// * You've turned on periodic syncs.
|
||||
// (Anything that trigger's DeltaFIFO's Replace() method.)
|
||||
Sync DeltaType = "Sync"
|
||||
)
|
||||
|
||||
// Delta is the type stored by a DeltaFIFO. It tells you what change
|
||||
// happened, and the object's state after* that change.
|
||||
//
|
||||
// [*] Unless the change is a deletion, and then you'll get the final
|
||||
// state of the object before it was deleted.
|
||||
type Delta struct {
|
||||
Type DeltaType
|
||||
Object interface{}
|
||||
}
|
||||
|
||||
// Deltas is a list of one or more 'Delta's to an individual object.
|
||||
// The oldest delta is at index 0, the newest delta is the last one.
|
||||
type Deltas []Delta
|
||||
|
||||
// Oldest is a convenience function that returns the oldest delta, or
|
||||
// nil if there are no deltas.
|
||||
func (d Deltas) Oldest() *Delta {
|
||||
if len(d) > 0 {
|
||||
return &d[0]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Newest is a convenience function that returns the newest delta, or
|
||||
// nil if there are no deltas.
|
||||
func (d Deltas) Newest() *Delta {
|
||||
if n := len(d); n > 0 {
|
||||
return &d[n-1]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// copyDeltas returns a shallow copy of d; that is, it copies the slice but not
|
||||
// the objects in the slice. This allows Get/List to return an object that we
|
||||
// know won't be clobbered by a subsequent call to a delta compressor.
|
||||
func copyDeltas(d Deltas) Deltas {
|
||||
d2 := make(Deltas, len(d))
|
||||
copy(d2, d)
|
||||
return d2
|
||||
}
|
||||
|
||||
// DeletedFinalStateUnknown is placed into a DeltaFIFO in the case where
|
||||
// an object was deleted but the watch deletion event was missed. In this
|
||||
// case we don't know the final "resting" state of the object, so there's
|
||||
// a chance the included `Obj` is stale.
|
||||
type DeletedFinalStateUnknown struct {
|
||||
Key string
|
||||
Obj interface{}
|
||||
}
|
|
@ -0,0 +1,24 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package cache is a client-side caching mechanism. It is useful for
|
||||
// reducing the number of server calls you'd otherwise need to make.
|
||||
// Reflector watches a server and updates a Store. Two stores are provided;
|
||||
// one that simply caches objects (for example, to allow a scheduler to
|
||||
// list currently available nodes), and one that additionally acts as
|
||||
// a FIFO queue (for example, to allow a scheduler to process incoming
|
||||
// pods).
|
||||
package cache // import "k8s.io/client-go/tools/cache"
|
|
@ -0,0 +1,208 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
)
|
||||
|
||||
// ExpirationCache implements the store interface
|
||||
// 1. All entries are automatically time stamped on insert
|
||||
// a. The key is computed based off the original item/keyFunc
|
||||
// b. The value inserted under that key is the timestamped item
|
||||
// 2. Expiration happens lazily on read based on the expiration policy
|
||||
// a. No item can be inserted into the store while we're expiring
|
||||
// *any* item in the cache.
|
||||
// 3. Time-stamps are stripped off unexpired entries before return
|
||||
// Note that the ExpirationCache is inherently slower than a normal
|
||||
// threadSafeStore because it takes a write lock every time it checks if
|
||||
// an item has expired.
|
||||
type ExpirationCache struct {
|
||||
cacheStorage ThreadSafeStore
|
||||
keyFunc KeyFunc
|
||||
clock clock.Clock
|
||||
expirationPolicy ExpirationPolicy
|
||||
// expirationLock is a write lock used to guarantee that we don't clobber
|
||||
// newly inserted objects because of a stale expiration timestamp comparison
|
||||
expirationLock sync.Mutex
|
||||
}
|
||||
|
||||
// ExpirationPolicy dictates when an object expires. Currently only abstracted out
|
||||
// so unittests don't rely on the system clock.
|
||||
type ExpirationPolicy interface {
|
||||
IsExpired(obj *timestampedEntry) bool
|
||||
}
|
||||
|
||||
// TTLPolicy implements a ttl based ExpirationPolicy.
|
||||
type TTLPolicy struct {
|
||||
// >0: Expire entries with an age > ttl
|
||||
// <=0: Don't expire any entry
|
||||
Ttl time.Duration
|
||||
|
||||
// Clock used to calculate ttl expiration
|
||||
Clock clock.Clock
|
||||
}
|
||||
|
||||
// IsExpired returns true if the given object is older than the ttl, or it can't
|
||||
// determine its age.
|
||||
func (p *TTLPolicy) IsExpired(obj *timestampedEntry) bool {
|
||||
return p.Ttl > 0 && p.Clock.Since(obj.timestamp) > p.Ttl
|
||||
}
|
||||
|
||||
// timestampedEntry is the only type allowed in a ExpirationCache.
|
||||
type timestampedEntry struct {
|
||||
obj interface{}
|
||||
timestamp time.Time
|
||||
}
|
||||
|
||||
// getTimestampedEntry returns the timestampedEntry stored under the given key.
|
||||
func (c *ExpirationCache) getTimestampedEntry(key string) (*timestampedEntry, bool) {
|
||||
item, _ := c.cacheStorage.Get(key)
|
||||
if tsEntry, ok := item.(*timestampedEntry); ok {
|
||||
return tsEntry, true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// getOrExpire retrieves the object from the timestampedEntry if and only if it hasn't
|
||||
// already expired. It holds a write lock across deletion.
|
||||
func (c *ExpirationCache) getOrExpire(key string) (interface{}, bool) {
|
||||
// Prevent all inserts from the time we deem an item as "expired" to when we
|
||||
// delete it, so an un-expired item doesn't sneak in under the same key, just
|
||||
// before the Delete.
|
||||
c.expirationLock.Lock()
|
||||
defer c.expirationLock.Unlock()
|
||||
timestampedItem, exists := c.getTimestampedEntry(key)
|
||||
if !exists {
|
||||
return nil, false
|
||||
}
|
||||
if c.expirationPolicy.IsExpired(timestampedItem) {
|
||||
glog.V(4).Infof("Entry %v: %+v has expired", key, timestampedItem.obj)
|
||||
c.cacheStorage.Delete(key)
|
||||
return nil, false
|
||||
}
|
||||
return timestampedItem.obj, true
|
||||
}
|
||||
|
||||
// GetByKey returns the item stored under the key, or sets exists=false.
|
||||
func (c *ExpirationCache) GetByKey(key string) (interface{}, bool, error) {
|
||||
obj, exists := c.getOrExpire(key)
|
||||
return obj, exists, nil
|
||||
}
|
||||
|
||||
// Get returns unexpired items. It purges the cache of expired items in the
|
||||
// process.
|
||||
func (c *ExpirationCache) Get(obj interface{}) (interface{}, bool, error) {
|
||||
key, err := c.keyFunc(obj)
|
||||
if err != nil {
|
||||
return nil, false, KeyError{obj, err}
|
||||
}
|
||||
obj, exists := c.getOrExpire(key)
|
||||
return obj, exists, nil
|
||||
}
|
||||
|
||||
// List retrieves a list of unexpired items. It purges the cache of expired
|
||||
// items in the process.
|
||||
func (c *ExpirationCache) List() []interface{} {
|
||||
items := c.cacheStorage.List()
|
||||
|
||||
list := make([]interface{}, 0, len(items))
|
||||
for _, item := range items {
|
||||
obj := item.(*timestampedEntry).obj
|
||||
if key, err := c.keyFunc(obj); err != nil {
|
||||
list = append(list, obj)
|
||||
} else if obj, exists := c.getOrExpire(key); exists {
|
||||
list = append(list, obj)
|
||||
}
|
||||
}
|
||||
return list
|
||||
}
|
||||
|
||||
// ListKeys returns a list of all keys in the expiration cache.
|
||||
func (c *ExpirationCache) ListKeys() []string {
|
||||
return c.cacheStorage.ListKeys()
|
||||
}
|
||||
|
||||
// Add timestamps an item and inserts it into the cache, overwriting entries
|
||||
// that might exist under the same key.
|
||||
func (c *ExpirationCache) Add(obj interface{}) error {
|
||||
c.expirationLock.Lock()
|
||||
defer c.expirationLock.Unlock()
|
||||
|
||||
key, err := c.keyFunc(obj)
|
||||
if err != nil {
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
c.cacheStorage.Add(key, ×tampedEntry{obj, c.clock.Now()})
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update has not been implemented yet for lack of a use case, so this method
|
||||
// simply calls `Add`. This effectively refreshes the timestamp.
|
||||
func (c *ExpirationCache) Update(obj interface{}) error {
|
||||
return c.Add(obj)
|
||||
}
|
||||
|
||||
// Delete removes an item from the cache.
|
||||
func (c *ExpirationCache) Delete(obj interface{}) error {
|
||||
c.expirationLock.Lock()
|
||||
defer c.expirationLock.Unlock()
|
||||
key, err := c.keyFunc(obj)
|
||||
if err != nil {
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
c.cacheStorage.Delete(key)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Replace will convert all items in the given list to TimestampedEntries
|
||||
// before attempting the replace operation. The replace operation will
|
||||
// delete the contents of the ExpirationCache `c`.
|
||||
func (c *ExpirationCache) Replace(list []interface{}, resourceVersion string) error {
|
||||
c.expirationLock.Lock()
|
||||
defer c.expirationLock.Unlock()
|
||||
items := map[string]interface{}{}
|
||||
ts := c.clock.Now()
|
||||
for _, item := range list {
|
||||
key, err := c.keyFunc(item)
|
||||
if err != nil {
|
||||
return KeyError{item, err}
|
||||
}
|
||||
items[key] = ×tampedEntry{item, ts}
|
||||
}
|
||||
c.cacheStorage.Replace(items, resourceVersion)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Resync will touch all objects to put them into the processing queue
|
||||
func (c *ExpirationCache) Resync() error {
|
||||
return c.cacheStorage.Resync()
|
||||
}
|
||||
|
||||
// NewTTLStore creates and returns a ExpirationCache with a TTLPolicy
|
||||
func NewTTLStore(keyFunc KeyFunc, ttl time.Duration) Store {
|
||||
return &ExpirationCache{
|
||||
cacheStorage: NewThreadSafeStore(Indexers{}, Indices{}),
|
||||
keyFunc: keyFunc,
|
||||
clock: clock.RealClock{},
|
||||
expirationPolicy: &TTLPolicy{ttl, clock.RealClock{}},
|
||||
}
|
||||
}
|
|
@ -0,0 +1,54 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
type fakeThreadSafeMap struct {
|
||||
ThreadSafeStore
|
||||
deletedKeys chan<- string
|
||||
}
|
||||
|
||||
func (c *fakeThreadSafeMap) Delete(key string) {
|
||||
if c.deletedKeys != nil {
|
||||
c.ThreadSafeStore.Delete(key)
|
||||
c.deletedKeys <- key
|
||||
}
|
||||
}
|
||||
|
||||
type FakeExpirationPolicy struct {
|
||||
NeverExpire sets.String
|
||||
RetrieveKeyFunc KeyFunc
|
||||
}
|
||||
|
||||
func (p *FakeExpirationPolicy) IsExpired(obj *timestampedEntry) bool {
|
||||
key, _ := p.RetrieveKeyFunc(obj)
|
||||
return !p.NeverExpire.Has(key)
|
||||
}
|
||||
|
||||
func NewFakeExpirationStore(keyFunc KeyFunc, deletedKeys chan<- string, expirationPolicy ExpirationPolicy, cacheClock clock.Clock) Store {
|
||||
cacheStorage := NewThreadSafeStore(Indexers{}, Indices{})
|
||||
return &ExpirationCache{
|
||||
cacheStorage: &fakeThreadSafeMap{cacheStorage, deletedKeys},
|
||||
keyFunc: keyFunc,
|
||||
clock: cacheClock,
|
||||
expirationPolicy: expirationPolicy,
|
||||
}
|
||||
}
|
|
@ -0,0 +1,102 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
// FakeStore lets you define custom functions for store operations
|
||||
type FakeCustomStore struct {
|
||||
AddFunc func(obj interface{}) error
|
||||
UpdateFunc func(obj interface{}) error
|
||||
DeleteFunc func(obj interface{}) error
|
||||
ListFunc func() []interface{}
|
||||
ListKeysFunc func() []string
|
||||
GetFunc func(obj interface{}) (item interface{}, exists bool, err error)
|
||||
GetByKeyFunc func(key string) (item interface{}, exists bool, err error)
|
||||
ReplaceFunc func(list []interface{}, resourceVerion string) error
|
||||
ResyncFunc func() error
|
||||
}
|
||||
|
||||
// Add calls the custom Add function if defined
|
||||
func (f *FakeCustomStore) Add(obj interface{}) error {
|
||||
if f.AddFunc != nil {
|
||||
return f.AddFunc(obj)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update calls the custom Update function if defined
|
||||
func (f *FakeCustomStore) Update(obj interface{}) error {
|
||||
if f.UpdateFunc != nil {
|
||||
return f.Update(obj)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete calls the custom Delete function if defined
|
||||
func (f *FakeCustomStore) Delete(obj interface{}) error {
|
||||
if f.DeleteFunc != nil {
|
||||
return f.DeleteFunc(obj)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// List calls the custom List function if defined
|
||||
func (f *FakeCustomStore) List() []interface{} {
|
||||
if f.ListFunc != nil {
|
||||
return f.ListFunc()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListKeys calls the custom ListKeys function if defined
|
||||
func (f *FakeCustomStore) ListKeys() []string {
|
||||
if f.ListKeysFunc != nil {
|
||||
return f.ListKeysFunc()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get calls the custom Get function if defined
|
||||
func (f *FakeCustomStore) Get(obj interface{}) (item interface{}, exists bool, err error) {
|
||||
if f.GetFunc != nil {
|
||||
return f.GetFunc(obj)
|
||||
}
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
// GetByKey calls the custom GetByKey function if defined
|
||||
func (f *FakeCustomStore) GetByKey(key string) (item interface{}, exists bool, err error) {
|
||||
if f.GetByKeyFunc != nil {
|
||||
return f.GetByKeyFunc(key)
|
||||
}
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
// Replace calls the custom Replace function if defined
|
||||
func (f *FakeCustomStore) Replace(list []interface{}, resourceVersion string) error {
|
||||
if f.ReplaceFunc != nil {
|
||||
return f.ReplaceFunc(list, resourceVersion)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Resync calls the custom Resync function if defined
|
||||
func (f *FakeCustomStore) Resync() error {
|
||||
if f.ResyncFunc != nil {
|
||||
return f.ResyncFunc()
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,358 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
// PopProcessFunc is passed to Pop() method of Queue interface.
|
||||
// It is supposed to process the element popped from the queue.
|
||||
type PopProcessFunc func(interface{}) error
|
||||
|
||||
// ErrRequeue may be returned by a PopProcessFunc to safely requeue
|
||||
// the current item. The value of Err will be returned from Pop.
|
||||
type ErrRequeue struct {
|
||||
// Err is returned by the Pop function
|
||||
Err error
|
||||
}
|
||||
|
||||
var FIFOClosedError error = errors.New("DeltaFIFO: manipulating with closed queue")
|
||||
|
||||
func (e ErrRequeue) Error() string {
|
||||
if e.Err == nil {
|
||||
return "the popped item should be requeued without returning an error"
|
||||
}
|
||||
return e.Err.Error()
|
||||
}
|
||||
|
||||
// Queue is exactly like a Store, but has a Pop() method too.
|
||||
type Queue interface {
|
||||
Store
|
||||
|
||||
// Pop blocks until it has something to process.
|
||||
// It returns the object that was process and the result of processing.
|
||||
// The PopProcessFunc may return an ErrRequeue{...} to indicate the item
|
||||
// should be requeued before releasing the lock on the queue.
|
||||
Pop(PopProcessFunc) (interface{}, error)
|
||||
|
||||
// AddIfNotPresent adds a value previously
|
||||
// returned by Pop back into the queue as long
|
||||
// as nothing else (presumably more recent)
|
||||
// has since been added.
|
||||
AddIfNotPresent(interface{}) error
|
||||
|
||||
// Return true if the first batch of items has been popped
|
||||
HasSynced() bool
|
||||
|
||||
// Close queue
|
||||
Close()
|
||||
}
|
||||
|
||||
// Helper function for popping from Queue.
|
||||
// WARNING: Do NOT use this function in non-test code to avoid races
|
||||
// unless you really really really really know what you are doing.
|
||||
func Pop(queue Queue) interface{} {
|
||||
var result interface{}
|
||||
queue.Pop(func(obj interface{}) error {
|
||||
result = obj
|
||||
return nil
|
||||
})
|
||||
return result
|
||||
}
|
||||
|
||||
// FIFO receives adds and updates from a Reflector, and puts them in a queue for
|
||||
// FIFO order processing. If multiple adds/updates of a single item happen while
|
||||
// an item is in the queue before it has been processed, it will only be
|
||||
// processed once, and when it is processed, the most recent version will be
|
||||
// processed. This can't be done with a channel.
|
||||
//
|
||||
// FIFO solves this use case:
|
||||
// * You want to process every object (exactly) once.
|
||||
// * You want to process the most recent version of the object when you process it.
|
||||
// * You do not want to process deleted objects, they should be removed from the queue.
|
||||
// * You do not want to periodically reprocess objects.
|
||||
// Compare with DeltaFIFO for other use cases.
|
||||
type FIFO struct {
|
||||
lock sync.RWMutex
|
||||
cond sync.Cond
|
||||
// We depend on the property that items in the set are in the queue and vice versa.
|
||||
items map[string]interface{}
|
||||
queue []string
|
||||
|
||||
// populated is true if the first batch of items inserted by Replace() has been populated
|
||||
// or Delete/Add/Update was called first.
|
||||
populated bool
|
||||
// initialPopulationCount is the number of items inserted by the first call of Replace()
|
||||
initialPopulationCount int
|
||||
|
||||
// keyFunc is used to make the key used for queued item insertion and retrieval, and
|
||||
// should be deterministic.
|
||||
keyFunc KeyFunc
|
||||
|
||||
// Indication the queue is closed.
|
||||
// Used to indicate a queue is closed so a control loop can exit when a queue is empty.
|
||||
// Currently, not used to gate any of CRED operations.
|
||||
closed bool
|
||||
closedLock sync.Mutex
|
||||
}
|
||||
|
||||
var (
|
||||
_ = Queue(&FIFO{}) // FIFO is a Queue
|
||||
)
|
||||
|
||||
// Close the queue.
|
||||
func (f *FIFO) Close() {
|
||||
f.closedLock.Lock()
|
||||
defer f.closedLock.Unlock()
|
||||
f.closed = true
|
||||
f.cond.Broadcast()
|
||||
}
|
||||
|
||||
// Return true if an Add/Update/Delete/AddIfNotPresent are called first,
|
||||
// or an Update called first but the first batch of items inserted by Replace() has been popped
|
||||
func (f *FIFO) HasSynced() bool {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
return f.populated && f.initialPopulationCount == 0
|
||||
}
|
||||
|
||||
// Add inserts an item, and puts it in the queue. The item is only enqueued
|
||||
// if it doesn't already exist in the set.
|
||||
func (f *FIFO) Add(obj interface{}) error {
|
||||
id, err := f.keyFunc(obj)
|
||||
if err != nil {
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
f.populated = true
|
||||
if _, exists := f.items[id]; !exists {
|
||||
f.queue = append(f.queue, id)
|
||||
}
|
||||
f.items[id] = obj
|
||||
f.cond.Broadcast()
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddIfNotPresent inserts an item, and puts it in the queue. If the item is already
|
||||
// present in the set, it is neither enqueued nor added to the set.
|
||||
//
|
||||
// This is useful in a single producer/consumer scenario so that the consumer can
|
||||
// safely retry items without contending with the producer and potentially enqueueing
|
||||
// stale items.
|
||||
func (f *FIFO) AddIfNotPresent(obj interface{}) error {
|
||||
id, err := f.keyFunc(obj)
|
||||
if err != nil {
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
f.addIfNotPresent(id, obj)
|
||||
return nil
|
||||
}
|
||||
|
||||
// addIfNotPresent assumes the fifo lock is already held and adds the the provided
|
||||
// item to the queue under id if it does not already exist.
|
||||
func (f *FIFO) addIfNotPresent(id string, obj interface{}) {
|
||||
f.populated = true
|
||||
if _, exists := f.items[id]; exists {
|
||||
return
|
||||
}
|
||||
|
||||
f.queue = append(f.queue, id)
|
||||
f.items[id] = obj
|
||||
f.cond.Broadcast()
|
||||
}
|
||||
|
||||
// Update is the same as Add in this implementation.
|
||||
func (f *FIFO) Update(obj interface{}) error {
|
||||
return f.Add(obj)
|
||||
}
|
||||
|
||||
// Delete removes an item. It doesn't add it to the queue, because
|
||||
// this implementation assumes the consumer only cares about the objects,
|
||||
// not the order in which they were created/added.
|
||||
func (f *FIFO) Delete(obj interface{}) error {
|
||||
id, err := f.keyFunc(obj)
|
||||
if err != nil {
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
f.populated = true
|
||||
delete(f.items, id)
|
||||
return err
|
||||
}
|
||||
|
||||
// List returns a list of all the items.
|
||||
func (f *FIFO) List() []interface{} {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
list := make([]interface{}, 0, len(f.items))
|
||||
for _, item := range f.items {
|
||||
list = append(list, item)
|
||||
}
|
||||
return list
|
||||
}
|
||||
|
||||
// ListKeys returns a list of all the keys of the objects currently
|
||||
// in the FIFO.
|
||||
func (f *FIFO) ListKeys() []string {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
list := make([]string, 0, len(f.items))
|
||||
for key := range f.items {
|
||||
list = append(list, key)
|
||||
}
|
||||
return list
|
||||
}
|
||||
|
||||
// Get returns the requested item, or sets exists=false.
|
||||
func (f *FIFO) Get(obj interface{}) (item interface{}, exists bool, err error) {
|
||||
key, err := f.keyFunc(obj)
|
||||
if err != nil {
|
||||
return nil, false, KeyError{obj, err}
|
||||
}
|
||||
return f.GetByKey(key)
|
||||
}
|
||||
|
||||
// GetByKey returns the requested item, or sets exists=false.
|
||||
func (f *FIFO) GetByKey(key string) (item interface{}, exists bool, err error) {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
item, exists = f.items[key]
|
||||
return item, exists, nil
|
||||
}
|
||||
|
||||
// Checks if the queue is closed
|
||||
func (f *FIFO) IsClosed() bool {
|
||||
f.closedLock.Lock()
|
||||
defer f.closedLock.Unlock()
|
||||
if f.closed {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Pop waits until an item is ready and processes it. If multiple items are
|
||||
// ready, they are returned in the order in which they were added/updated.
|
||||
// The item is removed from the queue (and the store) before it is processed,
|
||||
// so if you don't successfully process it, it should be added back with
|
||||
// AddIfNotPresent(). process function is called under lock, so it is safe
|
||||
// update data structures in it that need to be in sync with the queue.
|
||||
func (f *FIFO) Pop(process PopProcessFunc) (interface{}, error) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
for {
|
||||
for len(f.queue) == 0 {
|
||||
// When the queue is empty, invocation of Pop() is blocked until new item is enqueued.
|
||||
// When Close() is called, the f.closed is set and the condition is broadcasted.
|
||||
// Which causes this loop to continue and return from the Pop().
|
||||
if f.IsClosed() {
|
||||
return nil, FIFOClosedError
|
||||
}
|
||||
|
||||
f.cond.Wait()
|
||||
}
|
||||
id := f.queue[0]
|
||||
f.queue = f.queue[1:]
|
||||
if f.initialPopulationCount > 0 {
|
||||
f.initialPopulationCount--
|
||||
}
|
||||
item, ok := f.items[id]
|
||||
if !ok {
|
||||
// Item may have been deleted subsequently.
|
||||
continue
|
||||
}
|
||||
delete(f.items, id)
|
||||
err := process(item)
|
||||
if e, ok := err.(ErrRequeue); ok {
|
||||
f.addIfNotPresent(id, item)
|
||||
err = e.Err
|
||||
}
|
||||
return item, err
|
||||
}
|
||||
}
|
||||
|
||||
// Replace will delete the contents of 'f', using instead the given map.
|
||||
// 'f' takes ownership of the map, you should not reference the map again
|
||||
// after calling this function. f's queue is reset, too; upon return, it
|
||||
// will contain the items in the map, in no particular order.
|
||||
func (f *FIFO) Replace(list []interface{}, resourceVersion string) error {
|
||||
items := map[string]interface{}{}
|
||||
for _, item := range list {
|
||||
key, err := f.keyFunc(item)
|
||||
if err != nil {
|
||||
return KeyError{item, err}
|
||||
}
|
||||
items[key] = item
|
||||
}
|
||||
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
if !f.populated {
|
||||
f.populated = true
|
||||
f.initialPopulationCount = len(items)
|
||||
}
|
||||
|
||||
f.items = items
|
||||
f.queue = f.queue[:0]
|
||||
for id := range items {
|
||||
f.queue = append(f.queue, id)
|
||||
}
|
||||
if len(f.queue) > 0 {
|
||||
f.cond.Broadcast()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Resync will touch all objects to put them into the processing queue
|
||||
func (f *FIFO) Resync() error {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
inQueue := sets.NewString()
|
||||
for _, id := range f.queue {
|
||||
inQueue.Insert(id)
|
||||
}
|
||||
for id := range f.items {
|
||||
if !inQueue.Has(id) {
|
||||
f.queue = append(f.queue, id)
|
||||
}
|
||||
}
|
||||
if len(f.queue) > 0 {
|
||||
f.cond.Broadcast()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewFIFO returns a Store which can be used to queue up items to
|
||||
// process.
|
||||
func NewFIFO(keyFunc KeyFunc) *FIFO {
|
||||
f := &FIFO{
|
||||
items: map[string]interface{}{},
|
||||
queue: []string{},
|
||||
keyFunc: keyFunc,
|
||||
}
|
||||
f.cond.L = &f.lock
|
||||
return f
|
||||
}
|
|
@ -0,0 +1,323 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This file implements a heap data structure.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
closedMsg = "heap is closed"
|
||||
)
|
||||
|
||||
type LessFunc func(interface{}, interface{}) bool
|
||||
type heapItem struct {
|
||||
obj interface{} // The object which is stored in the heap.
|
||||
index int // The index of the object's key in the Heap.queue.
|
||||
}
|
||||
|
||||
type itemKeyValue struct {
|
||||
key string
|
||||
obj interface{}
|
||||
}
|
||||
|
||||
// heapData is an internal struct that implements the standard heap interface
|
||||
// and keeps the data stored in the heap.
|
||||
type heapData struct {
|
||||
// items is a map from key of the objects to the objects and their index.
|
||||
// We depend on the property that items in the map are in the queue and vice versa.
|
||||
items map[string]*heapItem
|
||||
// queue implements a heap data structure and keeps the order of elements
|
||||
// according to the heap invariant. The queue keeps the keys of objects stored
|
||||
// in "items".
|
||||
queue []string
|
||||
|
||||
// keyFunc is used to make the key used for queued item insertion and retrieval, and
|
||||
// should be deterministic.
|
||||
keyFunc KeyFunc
|
||||
// lessFunc is used to compare two objects in the heap.
|
||||
lessFunc LessFunc
|
||||
}
|
||||
|
||||
var (
|
||||
_ = heap.Interface(&heapData{}) // heapData is a standard heap
|
||||
)
|
||||
|
||||
// Less compares two objects and returns true if the first one should go
|
||||
// in front of the second one in the heap.
|
||||
func (h *heapData) Less(i, j int) bool {
|
||||
if i > len(h.queue) || j > len(h.queue) {
|
||||
return false
|
||||
}
|
||||
itemi, ok := h.items[h.queue[i]]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
itemj, ok := h.items[h.queue[j]]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return h.lessFunc(itemi.obj, itemj.obj)
|
||||
}
|
||||
|
||||
// Len returns the number of items in the Heap.
|
||||
func (h *heapData) Len() int { return len(h.queue) }
|
||||
|
||||
// Swap implements swapping of two elements in the heap. This is a part of standard
|
||||
// heap interface and should never be called directly.
|
||||
func (h *heapData) Swap(i, j int) {
|
||||
h.queue[i], h.queue[j] = h.queue[j], h.queue[i]
|
||||
item := h.items[h.queue[i]]
|
||||
item.index = i
|
||||
item = h.items[h.queue[j]]
|
||||
item.index = j
|
||||
}
|
||||
|
||||
// Push is supposed to be called by heap.Push only.
|
||||
func (h *heapData) Push(kv interface{}) {
|
||||
keyValue := kv.(*itemKeyValue)
|
||||
n := len(h.queue)
|
||||
h.items[keyValue.key] = &heapItem{keyValue.obj, n}
|
||||
h.queue = append(h.queue, keyValue.key)
|
||||
}
|
||||
|
||||
// Pop is supposed to be called by heap.Pop only.
|
||||
func (h *heapData) Pop() interface{} {
|
||||
key := h.queue[len(h.queue)-1]
|
||||
h.queue = h.queue[0 : len(h.queue)-1]
|
||||
item, ok := h.items[key]
|
||||
if !ok {
|
||||
// This is an error
|
||||
return nil
|
||||
}
|
||||
delete(h.items, key)
|
||||
return item.obj
|
||||
}
|
||||
|
||||
// Heap is a thread-safe producer/consumer queue that implements a heap data structure.
|
||||
// It can be used to implement priority queues and similar data structures.
|
||||
type Heap struct {
|
||||
lock sync.RWMutex
|
||||
cond sync.Cond
|
||||
|
||||
// data stores objects and has a queue that keeps their ordering according
|
||||
// to the heap invariant.
|
||||
data *heapData
|
||||
|
||||
// closed indicates that the queue is closed.
|
||||
// It is mainly used to let Pop() exit its control loop while waiting for an item.
|
||||
closed bool
|
||||
}
|
||||
|
||||
// Close the Heap and signals condition variables that may be waiting to pop
|
||||
// items from the heap.
|
||||
func (h *Heap) Close() {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
h.closed = true
|
||||
h.cond.Broadcast()
|
||||
}
|
||||
|
||||
// Add inserts an item, and puts it in the queue. The item is updated if it
|
||||
// already exists.
|
||||
func (h *Heap) Add(obj interface{}) error {
|
||||
key, err := h.data.keyFunc(obj)
|
||||
if err != nil {
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
if h.closed {
|
||||
return fmt.Errorf(closedMsg)
|
||||
}
|
||||
if _, exists := h.data.items[key]; exists {
|
||||
h.data.items[key].obj = obj
|
||||
heap.Fix(h.data, h.data.items[key].index)
|
||||
} else {
|
||||
h.addIfNotPresentLocked(key, obj)
|
||||
}
|
||||
h.cond.Broadcast()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Adds all the items in the list to the queue and then signals the condition
|
||||
// variable. It is useful when the caller would like to add all of the items
|
||||
// to the queue before consumer starts processing them.
|
||||
func (h *Heap) BulkAdd(list []interface{}) error {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
if h.closed {
|
||||
return fmt.Errorf(closedMsg)
|
||||
}
|
||||
for _, obj := range list {
|
||||
key, err := h.data.keyFunc(obj)
|
||||
if err != nil {
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
if _, exists := h.data.items[key]; exists {
|
||||
h.data.items[key].obj = obj
|
||||
heap.Fix(h.data, h.data.items[key].index)
|
||||
} else {
|
||||
h.addIfNotPresentLocked(key, obj)
|
||||
}
|
||||
}
|
||||
h.cond.Broadcast()
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddIfNotPresent inserts an item, and puts it in the queue. If an item with
|
||||
// the key is present in the map, no changes is made to the item.
|
||||
//
|
||||
// This is useful in a single producer/consumer scenario so that the consumer can
|
||||
// safely retry items without contending with the producer and potentially enqueueing
|
||||
// stale items.
|
||||
func (h *Heap) AddIfNotPresent(obj interface{}) error {
|
||||
id, err := h.data.keyFunc(obj)
|
||||
if err != nil {
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
if h.closed {
|
||||
return fmt.Errorf(closedMsg)
|
||||
}
|
||||
h.addIfNotPresentLocked(id, obj)
|
||||
h.cond.Broadcast()
|
||||
return nil
|
||||
}
|
||||
|
||||
// addIfNotPresentLocked assumes the lock is already held and adds the the provided
|
||||
// item to the queue if it does not already exist.
|
||||
func (h *Heap) addIfNotPresentLocked(key string, obj interface{}) {
|
||||
if _, exists := h.data.items[key]; exists {
|
||||
return
|
||||
}
|
||||
heap.Push(h.data, &itemKeyValue{key, obj})
|
||||
}
|
||||
|
||||
// Update is the same as Add in this implementation. When the item does not
|
||||
// exist, it is added.
|
||||
func (h *Heap) Update(obj interface{}) error {
|
||||
return h.Add(obj)
|
||||
}
|
||||
|
||||
// Delete removes an item.
|
||||
func (h *Heap) Delete(obj interface{}) error {
|
||||
key, err := h.data.keyFunc(obj)
|
||||
if err != nil {
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
if item, ok := h.data.items[key]; ok {
|
||||
heap.Remove(h.data, item.index)
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("object not found")
|
||||
}
|
||||
|
||||
// Pop waits until an item is ready. If multiple items are
|
||||
// ready, they are returned in the order given by Heap.data.lessFunc.
|
||||
func (h *Heap) Pop() (interface{}, error) {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
for len(h.data.queue) == 0 {
|
||||
// When the queue is empty, invocation of Pop() is blocked until new item is enqueued.
|
||||
// When Close() is called, the h.closed is set and the condition is broadcast,
|
||||
// which causes this loop to continue and return from the Pop().
|
||||
if h.closed {
|
||||
return nil, fmt.Errorf("heap is closed")
|
||||
}
|
||||
h.cond.Wait()
|
||||
}
|
||||
obj := heap.Pop(h.data)
|
||||
if obj != nil {
|
||||
return obj, nil
|
||||
} else {
|
||||
return nil, fmt.Errorf("object was removed from heap data")
|
||||
}
|
||||
}
|
||||
|
||||
// List returns a list of all the items.
|
||||
func (h *Heap) List() []interface{} {
|
||||
h.lock.RLock()
|
||||
defer h.lock.RUnlock()
|
||||
list := make([]interface{}, 0, len(h.data.items))
|
||||
for _, item := range h.data.items {
|
||||
list = append(list, item.obj)
|
||||
}
|
||||
return list
|
||||
}
|
||||
|
||||
// ListKeys returns a list of all the keys of the objects currently in the Heap.
|
||||
func (h *Heap) ListKeys() []string {
|
||||
h.lock.RLock()
|
||||
defer h.lock.RUnlock()
|
||||
list := make([]string, 0, len(h.data.items))
|
||||
for key := range h.data.items {
|
||||
list = append(list, key)
|
||||
}
|
||||
return list
|
||||
}
|
||||
|
||||
// Get returns the requested item, or sets exists=false.
|
||||
func (h *Heap) Get(obj interface{}) (interface{}, bool, error) {
|
||||
key, err := h.data.keyFunc(obj)
|
||||
if err != nil {
|
||||
return nil, false, KeyError{obj, err}
|
||||
}
|
||||
return h.GetByKey(key)
|
||||
}
|
||||
|
||||
// GetByKey returns the requested item, or sets exists=false.
|
||||
func (h *Heap) GetByKey(key string) (interface{}, bool, error) {
|
||||
h.lock.RLock()
|
||||
defer h.lock.RUnlock()
|
||||
item, exists := h.data.items[key]
|
||||
if !exists {
|
||||
return nil, false, nil
|
||||
}
|
||||
return item.obj, true, nil
|
||||
}
|
||||
|
||||
// IsClosed returns true if the queue is closed.
|
||||
func (h *Heap) IsClosed() bool {
|
||||
h.lock.RLock()
|
||||
defer h.lock.RUnlock()
|
||||
if h.closed {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// NewHeap returns a Heap which can be used to queue up items to process.
|
||||
func NewHeap(keyFn KeyFunc, lessFn LessFunc) *Heap {
|
||||
h := &Heap{
|
||||
data: &heapData{
|
||||
items: map[string]*heapItem{},
|
||||
queue: []string{},
|
||||
keyFunc: keyFn,
|
||||
lessFunc: lessFn,
|
||||
},
|
||||
}
|
||||
h.cond.L = &h.lock
|
||||
return h
|
||||
}
|
|
@ -0,0 +1,87 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
// Indexer is a storage interface that lets you list objects using multiple indexing functions
|
||||
type Indexer interface {
|
||||
Store
|
||||
// Retrieve list of objects that match on the named indexing function
|
||||
Index(indexName string, obj interface{}) ([]interface{}, error)
|
||||
// IndexKeys returns the set of keys that match on the named indexing function.
|
||||
IndexKeys(indexName, indexKey string) ([]string, error)
|
||||
// ListIndexFuncValues returns the list of generated values of an Index func
|
||||
ListIndexFuncValues(indexName string) []string
|
||||
// ByIndex lists object that match on the named indexing function with the exact key
|
||||
ByIndex(indexName, indexKey string) ([]interface{}, error)
|
||||
// GetIndexer return the indexers
|
||||
GetIndexers() Indexers
|
||||
|
||||
// AddIndexers adds more indexers to this store. If you call this after you already have data
|
||||
// in the store, the results are undefined.
|
||||
AddIndexers(newIndexers Indexers) error
|
||||
}
|
||||
|
||||
// IndexFunc knows how to provide an indexed value for an object.
|
||||
type IndexFunc func(obj interface{}) ([]string, error)
|
||||
|
||||
// IndexFuncToKeyFuncAdapter adapts an indexFunc to a keyFunc. This is only useful if your index function returns
|
||||
// unique values for every object. This is conversion can create errors when more than one key is found. You
|
||||
// should prefer to make proper key and index functions.
|
||||
func IndexFuncToKeyFuncAdapter(indexFunc IndexFunc) KeyFunc {
|
||||
return func(obj interface{}) (string, error) {
|
||||
indexKeys, err := indexFunc(obj)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(indexKeys) > 1 {
|
||||
return "", fmt.Errorf("too many keys: %v", indexKeys)
|
||||
}
|
||||
if len(indexKeys) == 0 {
|
||||
return "", fmt.Errorf("unexpected empty indexKeys")
|
||||
}
|
||||
return indexKeys[0], nil
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
NamespaceIndex string = "namespace"
|
||||
)
|
||||
|
||||
// MetaNamespaceIndexFunc is a default index function that indexes based on an object's namespace
|
||||
func MetaNamespaceIndexFunc(obj interface{}) ([]string, error) {
|
||||
meta, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
return []string{""}, fmt.Errorf("object has no meta: %v", err)
|
||||
}
|
||||
return []string{meta.GetNamespace()}, nil
|
||||
}
|
||||
|
||||
// Index maps the indexed value to a set of keys in the store that match on that value
|
||||
type Index map[string]sets.String
|
||||
|
||||
// Indexers maps a name to a IndexFunc
|
||||
type Indexers map[string]IndexFunc
|
||||
|
||||
// Indices maps a name to an Index
|
||||
type Indices map[string]Index
|
|
@ -0,0 +1,160 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
// AppendFunc is used to add a matching item to whatever list the caller is using
|
||||
type AppendFunc func(interface{})
|
||||
|
||||
func ListAll(store Store, selector labels.Selector, appendFn AppendFunc) error {
|
||||
for _, m := range store.List() {
|
||||
metadata, err := meta.Accessor(m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if selector.Matches(labels.Set(metadata.GetLabels())) {
|
||||
appendFn(m)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selector, appendFn AppendFunc) error {
|
||||
if namespace == metav1.NamespaceAll {
|
||||
for _, m := range indexer.List() {
|
||||
metadata, err := meta.Accessor(m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if selector.Matches(labels.Set(metadata.GetLabels())) {
|
||||
appendFn(m)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
items, err := indexer.Index(NamespaceIndex, &metav1.ObjectMeta{Namespace: namespace})
|
||||
if err != nil {
|
||||
// Ignore error; do slow search without index.
|
||||
glog.Warningf("can not retrieve list of objects using index : %v", err)
|
||||
for _, m := range indexer.List() {
|
||||
metadata, err := meta.Accessor(m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if metadata.GetNamespace() == namespace && selector.Matches(labels.Set(metadata.GetLabels())) {
|
||||
appendFn(m)
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
for _, m := range items {
|
||||
metadata, err := meta.Accessor(m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if selector.Matches(labels.Set(metadata.GetLabels())) {
|
||||
appendFn(m)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GenericLister is a lister skin on a generic Indexer
|
||||
type GenericLister interface {
|
||||
// List will return all objects across namespaces
|
||||
List(selector labels.Selector) (ret []runtime.Object, err error)
|
||||
// Get will attempt to retrieve assuming that name==key
|
||||
Get(name string) (runtime.Object, error)
|
||||
// ByNamespace will give you a GenericNamespaceLister for one namespace
|
||||
ByNamespace(namespace string) GenericNamespaceLister
|
||||
}
|
||||
|
||||
// GenericNamespaceLister is a lister skin on a generic Indexer
|
||||
type GenericNamespaceLister interface {
|
||||
// List will return all objects in this namespace
|
||||
List(selector labels.Selector) (ret []runtime.Object, err error)
|
||||
// Get will attempt to retrieve by namespace and name
|
||||
Get(name string) (runtime.Object, error)
|
||||
}
|
||||
|
||||
func NewGenericLister(indexer Indexer, resource schema.GroupResource) GenericLister {
|
||||
return &genericLister{indexer: indexer, resource: resource}
|
||||
}
|
||||
|
||||
type genericLister struct {
|
||||
indexer Indexer
|
||||
resource schema.GroupResource
|
||||
}
|
||||
|
||||
func (s *genericLister) List(selector labels.Selector) (ret []runtime.Object, err error) {
|
||||
err = ListAll(s.indexer, selector, func(m interface{}) {
|
||||
ret = append(ret, m.(runtime.Object))
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
func (s *genericLister) ByNamespace(namespace string) GenericNamespaceLister {
|
||||
return &genericNamespaceLister{indexer: s.indexer, namespace: namespace, resource: s.resource}
|
||||
}
|
||||
|
||||
func (s *genericLister) Get(name string) (runtime.Object, error) {
|
||||
obj, exists, err := s.indexer.GetByKey(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !exists {
|
||||
return nil, errors.NewNotFound(s.resource, name)
|
||||
}
|
||||
return obj.(runtime.Object), nil
|
||||
}
|
||||
|
||||
type genericNamespaceLister struct {
|
||||
indexer Indexer
|
||||
namespace string
|
||||
resource schema.GroupResource
|
||||
}
|
||||
|
||||
func (s *genericNamespaceLister) List(selector labels.Selector) (ret []runtime.Object, err error) {
|
||||
err = ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
|
||||
ret = append(ret, m.(runtime.Object))
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
func (s *genericNamespaceLister) Get(name string) (runtime.Object, error) {
|
||||
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !exists {
|
||||
return nil, errors.NewNotFound(s.resource, name)
|
||||
}
|
||||
return obj.(runtime.Object), nil
|
||||
}
|
|
@ -0,0 +1,173 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/pager"
|
||||
)
|
||||
|
||||
// ListerWatcher is any object that knows how to perform an initial list and start a watch on a resource.
|
||||
type ListerWatcher interface {
|
||||
// List should return a list type object; the Items field will be extracted, and the
|
||||
// ResourceVersion field will be used to start the watch in the right place.
|
||||
List(options metav1.ListOptions) (runtime.Object, error)
|
||||
// Watch should begin a watch at the specified version.
|
||||
Watch(options metav1.ListOptions) (watch.Interface, error)
|
||||
}
|
||||
|
||||
// ListFunc knows how to list resources
|
||||
type ListFunc func(options metav1.ListOptions) (runtime.Object, error)
|
||||
|
||||
// WatchFunc knows how to watch resources
|
||||
type WatchFunc func(options metav1.ListOptions) (watch.Interface, error)
|
||||
|
||||
// ListWatch knows how to list and watch a set of apiserver resources. It satisfies the ListerWatcher interface.
|
||||
// It is a convenience function for users of NewReflector, etc.
|
||||
// ListFunc and WatchFunc must not be nil
|
||||
type ListWatch struct {
|
||||
ListFunc ListFunc
|
||||
WatchFunc WatchFunc
|
||||
// DisableChunking requests no chunking for this list watcher. It has no effect in Kubernetes 1.8, but in
|
||||
// 1.9 will allow a controller to opt out of chunking.
|
||||
DisableChunking bool
|
||||
}
|
||||
|
||||
// Getter interface knows how to access Get method from RESTClient.
|
||||
type Getter interface {
|
||||
Get() *restclient.Request
|
||||
}
|
||||
|
||||
// NewListWatchFromClient creates a new ListWatch from the specified client, resource, namespace and field selector.
|
||||
func NewListWatchFromClient(c Getter, resource string, namespace string, fieldSelector fields.Selector) *ListWatch {
|
||||
listFunc := func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = fieldSelector.String()
|
||||
return c.Get().
|
||||
Namespace(namespace).
|
||||
Resource(resource).
|
||||
VersionedParams(&options, metav1.ParameterCodec).
|
||||
Do().
|
||||
Get()
|
||||
}
|
||||
watchFunc := func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.Watch = true
|
||||
options.FieldSelector = fieldSelector.String()
|
||||
return c.Get().
|
||||
Namespace(namespace).
|
||||
Resource(resource).
|
||||
VersionedParams(&options, metav1.ParameterCodec).
|
||||
Watch()
|
||||
}
|
||||
return &ListWatch{ListFunc: listFunc, WatchFunc: watchFunc}
|
||||
}
|
||||
|
||||
func timeoutFromListOptions(options metav1.ListOptions) time.Duration {
|
||||
if options.TimeoutSeconds != nil {
|
||||
return time.Duration(*options.TimeoutSeconds) * time.Second
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// List a set of apiserver resources
|
||||
func (lw *ListWatch) List(options metav1.ListOptions) (runtime.Object, error) {
|
||||
// chunking will become the default for list watchers starting in Kubernetes 1.9, unless
|
||||
// otherwise disabled.
|
||||
if false && !lw.DisableChunking {
|
||||
return pager.New(pager.SimplePageFunc(lw.ListFunc)).List(context.TODO(), options)
|
||||
}
|
||||
return lw.ListFunc(options)
|
||||
}
|
||||
|
||||
// Watch a set of apiserver resources
|
||||
func (lw *ListWatch) Watch(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return lw.WatchFunc(options)
|
||||
}
|
||||
|
||||
// TODO: check for watch expired error and retry watch from latest point? Same issue exists for Until.
|
||||
func ListWatchUntil(timeout time.Duration, lw ListerWatcher, conditions ...watch.ConditionFunc) (*watch.Event, error) {
|
||||
if len(conditions) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
list, err := lw.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
initialItems, err := meta.ExtractList(list)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// use the initial items as simulated "adds"
|
||||
var lastEvent *watch.Event
|
||||
currIndex := 0
|
||||
passedConditions := 0
|
||||
for _, condition := range conditions {
|
||||
// check the next condition against the previous event and short circuit waiting for the next watch
|
||||
if lastEvent != nil {
|
||||
done, err := condition(*lastEvent)
|
||||
if err != nil {
|
||||
return lastEvent, err
|
||||
}
|
||||
if done {
|
||||
passedConditions = passedConditions + 1
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
ConditionSucceeded:
|
||||
for currIndex < len(initialItems) {
|
||||
lastEvent = &watch.Event{Type: watch.Added, Object: initialItems[currIndex]}
|
||||
currIndex++
|
||||
|
||||
done, err := condition(*lastEvent)
|
||||
if err != nil {
|
||||
return lastEvent, err
|
||||
}
|
||||
if done {
|
||||
passedConditions = passedConditions + 1
|
||||
break ConditionSucceeded
|
||||
}
|
||||
}
|
||||
}
|
||||
if passedConditions == len(conditions) {
|
||||
return lastEvent, nil
|
||||
}
|
||||
remainingConditions := conditions[passedConditions:]
|
||||
|
||||
metaObj, err := meta.ListAccessor(list)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
currResourceVersion := metaObj.GetResourceVersion()
|
||||
|
||||
watchInterface, err := lw.Watch(metav1.ListOptions{ResourceVersion: currResourceVersion})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return watch.Until(timeout, watchInterface, remainingConditions...)
|
||||
}
|
|
@ -0,0 +1,261 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilcache "k8s.io/apimachinery/pkg/util/cache"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
// MutationCache is able to take the result of update operations and stores them in an LRU
|
||||
// that can be used to provide a more current view of a requested object. It requires interpreting
|
||||
// resourceVersions for comparisons.
|
||||
// Implementations must be thread-safe.
|
||||
// TODO find a way to layer this into an informer/lister
|
||||
type MutationCache interface {
|
||||
GetByKey(key string) (interface{}, bool, error)
|
||||
ByIndex(indexName, indexKey string) ([]interface{}, error)
|
||||
Mutation(interface{})
|
||||
}
|
||||
|
||||
type ResourceVersionComparator interface {
|
||||
CompareResourceVersion(lhs, rhs runtime.Object) int
|
||||
}
|
||||
|
||||
// NewIntegerResourceVersionMutationCache returns a MutationCache that understands how to
|
||||
// deal with objects that have a resource version that:
|
||||
//
|
||||
// - is an integer
|
||||
// - increases when updated
|
||||
// - is comparable across the same resource in a namespace
|
||||
//
|
||||
// Most backends will have these semantics. Indexer may be nil. ttl controls how long an item
|
||||
// remains in the mutation cache before it is removed.
|
||||
//
|
||||
// If includeAdds is true, objects in the mutation cache will be returned even if they don't exist
|
||||
// in the underlying store. This is only safe if your use of the cache can handle mutation entries
|
||||
// remaining in the cache for up to ttl when mutations and deletes occur very closely in time.
|
||||
func NewIntegerResourceVersionMutationCache(backingCache Store, indexer Indexer, ttl time.Duration, includeAdds bool) MutationCache {
|
||||
return &mutationCache{
|
||||
backingCache: backingCache,
|
||||
indexer: indexer,
|
||||
mutationCache: utilcache.NewLRUExpireCache(100),
|
||||
comparator: etcdObjectVersioner{},
|
||||
ttl: ttl,
|
||||
includeAdds: includeAdds,
|
||||
}
|
||||
}
|
||||
|
||||
// mutationCache doesn't guarantee that it returns values added via Mutation since they can page out and
|
||||
// since you can't distinguish between, "didn't observe create" and "was deleted after create",
|
||||
// if the key is missing from the backing cache, we always return it as missing
|
||||
type mutationCache struct {
|
||||
lock sync.Mutex
|
||||
backingCache Store
|
||||
indexer Indexer
|
||||
mutationCache *utilcache.LRUExpireCache
|
||||
includeAdds bool
|
||||
ttl time.Duration
|
||||
|
||||
comparator ResourceVersionComparator
|
||||
}
|
||||
|
||||
// GetByKey is never guaranteed to return back the value set in Mutation. It could be paged out, it could
|
||||
// be older than another copy, the backingCache may be more recent or, you might have written twice into the same key.
|
||||
// You get a value that was valid at some snapshot of time and will always return the newer of backingCache and mutationCache.
|
||||
func (c *mutationCache) GetByKey(key string) (interface{}, bool, error) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
obj, exists, err := c.backingCache.GetByKey(key)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
if !exists {
|
||||
if !c.includeAdds {
|
||||
// we can't distinguish between, "didn't observe create" and "was deleted after create", so
|
||||
// if the key is missing, we always return it as missing
|
||||
return nil, false, nil
|
||||
}
|
||||
obj, exists = c.mutationCache.Get(key)
|
||||
if !exists {
|
||||
return nil, false, nil
|
||||
}
|
||||
}
|
||||
objRuntime, ok := obj.(runtime.Object)
|
||||
if !ok {
|
||||
return obj, true, nil
|
||||
}
|
||||
return c.newerObject(key, objRuntime), true, nil
|
||||
}
|
||||
|
||||
// ByIndex returns the newer objects that match the provided index and indexer key.
|
||||
// Will return an error if no indexer was provided.
|
||||
func (c *mutationCache) ByIndex(name string, indexKey string) ([]interface{}, error) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
if c.indexer == nil {
|
||||
return nil, fmt.Errorf("no indexer has been provided to the mutation cache")
|
||||
}
|
||||
keys, err := c.indexer.IndexKeys(name, indexKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var items []interface{}
|
||||
keySet := sets.NewString()
|
||||
for _, key := range keys {
|
||||
keySet.Insert(key)
|
||||
obj, exists, err := c.indexer.GetByKey(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !exists {
|
||||
continue
|
||||
}
|
||||
if objRuntime, ok := obj.(runtime.Object); ok {
|
||||
items = append(items, c.newerObject(key, objRuntime))
|
||||
} else {
|
||||
items = append(items, obj)
|
||||
}
|
||||
}
|
||||
|
||||
if c.includeAdds {
|
||||
fn := c.indexer.GetIndexers()[name]
|
||||
// Keys() is returned oldest to newest, so full traversal does not alter the LRU behavior
|
||||
for _, key := range c.mutationCache.Keys() {
|
||||
updated, ok := c.mutationCache.Get(key)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if keySet.Has(key.(string)) {
|
||||
continue
|
||||
}
|
||||
elements, err := fn(updated)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Unable to calculate an index entry for mutation cache entry %s: %v", key, err)
|
||||
continue
|
||||
}
|
||||
for _, inIndex := range elements {
|
||||
if inIndex != indexKey {
|
||||
continue
|
||||
}
|
||||
items = append(items, updated)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return items, nil
|
||||
}
|
||||
|
||||
// newerObject checks the mutation cache for a newer object and returns one if found. If the
|
||||
// mutated object is older than the backing object, it is removed from the Must be
|
||||
// called while the lock is held.
|
||||
func (c *mutationCache) newerObject(key string, backing runtime.Object) runtime.Object {
|
||||
mutatedObj, exists := c.mutationCache.Get(key)
|
||||
if !exists {
|
||||
return backing
|
||||
}
|
||||
mutatedObjRuntime, ok := mutatedObj.(runtime.Object)
|
||||
if !ok {
|
||||
return backing
|
||||
}
|
||||
if c.comparator.CompareResourceVersion(backing, mutatedObjRuntime) >= 0 {
|
||||
c.mutationCache.Remove(key)
|
||||
return backing
|
||||
}
|
||||
return mutatedObjRuntime
|
||||
}
|
||||
|
||||
// Mutation adds a change to the cache that can be returned in GetByKey if it is newer than the backingCache
|
||||
// copy. If you call Mutation twice with the same object on different threads, one will win, but its not defined
|
||||
// which one. This doesn't affect correctness, since the GetByKey guaranteed of "later of these two caches" is
|
||||
// preserved, but you may not get the version of the object you want. The object you get is only guaranteed to
|
||||
// "one that was valid at some point in time", not "the one that I want".
|
||||
func (c *mutationCache) Mutation(obj interface{}) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
key, err := DeletionHandlingMetaNamespaceKeyFunc(obj)
|
||||
if err != nil {
|
||||
// this is a "nice to have", so failures shouldn't do anything weird
|
||||
utilruntime.HandleError(err)
|
||||
return
|
||||
}
|
||||
|
||||
if objRuntime, ok := obj.(runtime.Object); ok {
|
||||
if mutatedObj, exists := c.mutationCache.Get(key); exists {
|
||||
if mutatedObjRuntime, ok := mutatedObj.(runtime.Object); ok {
|
||||
if c.comparator.CompareResourceVersion(objRuntime, mutatedObjRuntime) < 0 {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
c.mutationCache.Add(key, obj, c.ttl)
|
||||
}
|
||||
|
||||
// etcdObjectVersioner implements versioning and extracting etcd node information
|
||||
// for objects that have an embedded ObjectMeta or ListMeta field.
|
||||
type etcdObjectVersioner struct{}
|
||||
|
||||
// ObjectResourceVersion implements Versioner
|
||||
func (a etcdObjectVersioner) ObjectResourceVersion(obj runtime.Object) (uint64, error) {
|
||||
accessor, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
version := accessor.GetResourceVersion()
|
||||
if len(version) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
return strconv.ParseUint(version, 10, 64)
|
||||
}
|
||||
|
||||
// CompareResourceVersion compares etcd resource versions. Outside this API they are all strings,
|
||||
// but etcd resource versions are special, they're actually ints, so we can easily compare them.
|
||||
func (a etcdObjectVersioner) CompareResourceVersion(lhs, rhs runtime.Object) int {
|
||||
lhsVersion, err := a.ObjectResourceVersion(lhs)
|
||||
if err != nil {
|
||||
// coder error
|
||||
panic(err)
|
||||
}
|
||||
rhsVersion, err := a.ObjectResourceVersion(rhs)
|
||||
if err != nil {
|
||||
// coder error
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if lhsVersion == rhsVersion {
|
||||
return 0
|
||||
}
|
||||
if lhsVersion < rhsVersion {
|
||||
return -1
|
||||
}
|
||||
|
||||
return 1
|
||||
}
|
|
@ -0,0 +1,133 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
)
|
||||
|
||||
var mutationDetectionEnabled = false
|
||||
|
||||
func init() {
|
||||
mutationDetectionEnabled, _ = strconv.ParseBool(os.Getenv("KUBE_CACHE_MUTATION_DETECTOR"))
|
||||
}
|
||||
|
||||
type CacheMutationDetector interface {
|
||||
AddObject(obj interface{})
|
||||
Run(stopCh <-chan struct{})
|
||||
}
|
||||
|
||||
func NewCacheMutationDetector(name string) CacheMutationDetector {
|
||||
if !mutationDetectionEnabled {
|
||||
return dummyMutationDetector{}
|
||||
}
|
||||
return &defaultCacheMutationDetector{name: name, period: 1 * time.Second}
|
||||
}
|
||||
|
||||
type dummyMutationDetector struct{}
|
||||
|
||||
func (dummyMutationDetector) Run(stopCh <-chan struct{}) {
|
||||
}
|
||||
func (dummyMutationDetector) AddObject(obj interface{}) {
|
||||
}
|
||||
|
||||
// defaultCacheMutationDetector gives a way to detect if a cached object has been mutated
|
||||
// It has a list of cached objects and their copies. I haven't thought of a way
|
||||
// to see WHO is mutating it, just that it's getting mutated.
|
||||
type defaultCacheMutationDetector struct {
|
||||
name string
|
||||
period time.Duration
|
||||
|
||||
lock sync.Mutex
|
||||
cachedObjs []cacheObj
|
||||
|
||||
// failureFunc is injectable for unit testing. If you don't have it, the process will panic.
|
||||
// This panic is intentional, since turning on this detection indicates you want a strong
|
||||
// failure signal. This failure is effectively a p0 bug and you can't trust process results
|
||||
// after a mutation anyway.
|
||||
failureFunc func(message string)
|
||||
}
|
||||
|
||||
// cacheObj holds the actual object and a copy
|
||||
type cacheObj struct {
|
||||
cached interface{}
|
||||
copied interface{}
|
||||
}
|
||||
|
||||
func (d *defaultCacheMutationDetector) Run(stopCh <-chan struct{}) {
|
||||
// we DON'T want protection from panics. If we're running this code, we want to die
|
||||
for {
|
||||
d.CompareObjects()
|
||||
|
||||
select {
|
||||
case <-stopCh:
|
||||
return
|
||||
case <-time.After(d.period):
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// AddObject makes a deep copy of the object for later comparison. It only works on runtime.Object
|
||||
// but that covers the vast majority of our cached objects
|
||||
func (d *defaultCacheMutationDetector) AddObject(obj interface{}) {
|
||||
if _, ok := obj.(DeletedFinalStateUnknown); ok {
|
||||
return
|
||||
}
|
||||
if _, ok := obj.(runtime.Object); !ok {
|
||||
return
|
||||
}
|
||||
|
||||
copiedObj, err := scheme.Scheme.Copy(obj.(runtime.Object))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
d.lock.Lock()
|
||||
defer d.lock.Unlock()
|
||||
d.cachedObjs = append(d.cachedObjs, cacheObj{cached: obj, copied: copiedObj})
|
||||
}
|
||||
|
||||
func (d *defaultCacheMutationDetector) CompareObjects() {
|
||||
d.lock.Lock()
|
||||
defer d.lock.Unlock()
|
||||
|
||||
altered := false
|
||||
for i, obj := range d.cachedObjs {
|
||||
if !reflect.DeepEqual(obj.cached, obj.copied) {
|
||||
fmt.Printf("CACHE %s[%d] ALTERED!\n%v\n", d.name, i, diff.ObjectDiff(obj.cached, obj.copied))
|
||||
altered = true
|
||||
}
|
||||
}
|
||||
|
||||
if altered {
|
||||
msg := fmt.Sprintf("cache %s modified", d.name)
|
||||
if d.failureFunc != nil {
|
||||
d.failureFunc(msg)
|
||||
return
|
||||
}
|
||||
panic(msg)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,442 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"regexp"
|
||||
goruntime "runtime"
|
||||
"runtime/debug"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
)
|
||||
|
||||
// Reflector watches a specified resource and causes all changes to be reflected in the given store.
|
||||
type Reflector struct {
|
||||
// name identifies this reflector. By default it will be a file:line if possible.
|
||||
name string
|
||||
// metrics tracks basic metric information about the reflector
|
||||
metrics *reflectorMetrics
|
||||
|
||||
// The type of object we expect to place in the store.
|
||||
expectedType reflect.Type
|
||||
// The destination to sync up with the watch source
|
||||
store Store
|
||||
// listerWatcher is used to perform lists and watches.
|
||||
listerWatcher ListerWatcher
|
||||
// period controls timing between one watch ending and
|
||||
// the beginning of the next one.
|
||||
period time.Duration
|
||||
resyncPeriod time.Duration
|
||||
ShouldResync func() bool
|
||||
// clock allows tests to manipulate time
|
||||
clock clock.Clock
|
||||
// lastSyncResourceVersion is the resource version token last
|
||||
// observed when doing a sync with the underlying store
|
||||
// it is thread safe, but not synchronized with the underlying store
|
||||
lastSyncResourceVersion string
|
||||
// lastSyncResourceVersionMutex guards read/write access to lastSyncResourceVersion
|
||||
lastSyncResourceVersionMutex sync.RWMutex
|
||||
}
|
||||
|
||||
var (
|
||||
// We try to spread the load on apiserver by setting timeouts for
|
||||
// watch requests - it is random in [minWatchTimeout, 2*minWatchTimeout].
|
||||
// However, it can be modified to avoid periodic resync to break the
|
||||
// TCP connection.
|
||||
minWatchTimeout = 5 * time.Minute
|
||||
)
|
||||
|
||||
// NewNamespaceKeyedIndexerAndReflector creates an Indexer and a Reflector
|
||||
// The indexer is configured to key on namespace
|
||||
func NewNamespaceKeyedIndexerAndReflector(lw ListerWatcher, expectedType interface{}, resyncPeriod time.Duration) (indexer Indexer, reflector *Reflector) {
|
||||
indexer = NewIndexer(MetaNamespaceKeyFunc, Indexers{"namespace": MetaNamespaceIndexFunc})
|
||||
reflector = NewReflector(lw, expectedType, indexer, resyncPeriod)
|
||||
return indexer, reflector
|
||||
}
|
||||
|
||||
// NewReflector creates a new Reflector object which will keep the given store up to
|
||||
// date with the server's contents for the given resource. Reflector promises to
|
||||
// only put things in the store that have the type of expectedType, unless expectedType
|
||||
// is nil. If resyncPeriod is non-zero, then lists will be executed after every
|
||||
// resyncPeriod, so that you can use reflectors to periodically process everything as
|
||||
// well as incrementally processing the things that change.
|
||||
func NewReflector(lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector {
|
||||
return NewNamedReflector(getDefaultReflectorName(internalPackages...), lw, expectedType, store, resyncPeriod)
|
||||
}
|
||||
|
||||
// reflectorDisambiguator is used to disambiguate started reflectors.
|
||||
// initialized to an unstable value to ensure meaning isn't attributed to the suffix.
|
||||
var reflectorDisambiguator = int64(time.Now().UnixNano() % 12345)
|
||||
|
||||
// NewNamedReflector same as NewReflector, but with a specified name for logging
|
||||
func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector {
|
||||
reflectorSuffix := atomic.AddInt64(&reflectorDisambiguator, 1)
|
||||
r := &Reflector{
|
||||
name: name,
|
||||
// we need this to be unique per process (some names are still the same)but obvious who it belongs to
|
||||
metrics: newReflectorMetrics(makeValidPromethusMetricName(fmt.Sprintf("reflector_"+name+"_%d", reflectorSuffix))),
|
||||
listerWatcher: lw,
|
||||
store: store,
|
||||
expectedType: reflect.TypeOf(expectedType),
|
||||
period: time.Second,
|
||||
resyncPeriod: resyncPeriod,
|
||||
clock: &clock.RealClock{},
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func makeValidPromethusMetricName(in string) string {
|
||||
// this isn't perfect, but it removes our common characters
|
||||
return strings.NewReplacer("/", "_", ".", "_", "-", "_").Replace(in)
|
||||
}
|
||||
|
||||
// internalPackages are packages that ignored when creating a default reflector name. These packages are in the common
|
||||
// call chains to NewReflector, so they'd be low entropy names for reflectors
|
||||
var internalPackages = []string{"client-go/tools/cache/", "/runtime/asm_"}
|
||||
|
||||
// getDefaultReflectorName walks back through the call stack until we find a caller from outside of the ignoredPackages
|
||||
// it returns back a shortpath/filename:line to aid in identification of this reflector when it starts logging
|
||||
func getDefaultReflectorName(ignoredPackages ...string) string {
|
||||
name := "????"
|
||||
const maxStack = 10
|
||||
for i := 1; i < maxStack; i++ {
|
||||
_, file, line, ok := goruntime.Caller(i)
|
||||
if !ok {
|
||||
file, line, ok = extractStackCreator()
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
i += maxStack
|
||||
}
|
||||
if hasPackage(file, ignoredPackages) {
|
||||
continue
|
||||
}
|
||||
|
||||
file = trimPackagePrefix(file)
|
||||
name = fmt.Sprintf("%s:%d", file, line)
|
||||
break
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
// hasPackage returns true if the file is in one of the ignored packages.
|
||||
func hasPackage(file string, ignoredPackages []string) bool {
|
||||
for _, ignoredPackage := range ignoredPackages {
|
||||
if strings.Contains(file, ignoredPackage) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// trimPackagePrefix reduces duplicate values off the front of a package name.
|
||||
func trimPackagePrefix(file string) string {
|
||||
if l := strings.LastIndex(file, "k8s.io/client-go/pkg/"); l >= 0 {
|
||||
return file[l+len("k8s.io/client-go/"):]
|
||||
}
|
||||
if l := strings.LastIndex(file, "/src/"); l >= 0 {
|
||||
return file[l+5:]
|
||||
}
|
||||
if l := strings.LastIndex(file, "/pkg/"); l >= 0 {
|
||||
return file[l+1:]
|
||||
}
|
||||
return file
|
||||
}
|
||||
|
||||
var stackCreator = regexp.MustCompile(`(?m)^created by (.*)\n\s+(.*):(\d+) \+0x[[:xdigit:]]+$`)
|
||||
|
||||
// extractStackCreator retrieves the goroutine file and line that launched this stack. Returns false
|
||||
// if the creator cannot be located.
|
||||
// TODO: Go does not expose this via runtime https://github.com/golang/go/issues/11440
|
||||
func extractStackCreator() (string, int, bool) {
|
||||
stack := debug.Stack()
|
||||
matches := stackCreator.FindStringSubmatch(string(stack))
|
||||
if matches == nil || len(matches) != 4 {
|
||||
return "", 0, false
|
||||
}
|
||||
line, err := strconv.Atoi(matches[3])
|
||||
if err != nil {
|
||||
return "", 0, false
|
||||
}
|
||||
return matches[2], line, true
|
||||
}
|
||||
|
||||
// Run starts a watch and handles watch events. Will restart the watch if it is closed.
|
||||
// Run will exit when stopCh is closed.
|
||||
func (r *Reflector) Run(stopCh <-chan struct{}) {
|
||||
glog.V(3).Infof("Starting reflector %v (%s) from %s", r.expectedType, r.resyncPeriod, r.name)
|
||||
wait.Until(func() {
|
||||
if err := r.ListAndWatch(stopCh); err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
}
|
||||
}, r.period, stopCh)
|
||||
}
|
||||
|
||||
var (
|
||||
// nothing will ever be sent down this channel
|
||||
neverExitWatch <-chan time.Time = make(chan time.Time)
|
||||
|
||||
// Used to indicate that watching stopped so that a resync could happen.
|
||||
errorResyncRequested = errors.New("resync channel fired")
|
||||
|
||||
// Used to indicate that watching stopped because of a signal from the stop
|
||||
// channel passed in from a client of the reflector.
|
||||
errorStopRequested = errors.New("Stop requested")
|
||||
)
|
||||
|
||||
// resyncChan returns a channel which will receive something when a resync is
|
||||
// required, and a cleanup function.
|
||||
func (r *Reflector) resyncChan() (<-chan time.Time, func() bool) {
|
||||
if r.resyncPeriod == 0 {
|
||||
return neverExitWatch, func() bool { return false }
|
||||
}
|
||||
// The cleanup function is required: imagine the scenario where watches
|
||||
// always fail so we end up listing frequently. Then, if we don't
|
||||
// manually stop the timer, we could end up with many timers active
|
||||
// concurrently.
|
||||
t := r.clock.NewTimer(r.resyncPeriod)
|
||||
return t.C(), t.Stop
|
||||
}
|
||||
|
||||
// ListAndWatch first lists all items and get the resource version at the moment of call,
|
||||
// and then use the resource version to watch.
|
||||
// It returns error if ListAndWatch didn't even try to initialize watch.
|
||||
func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
||||
glog.V(3).Infof("Listing and watching %v from %s", r.expectedType, r.name)
|
||||
var resourceVersion string
|
||||
|
||||
// Explicitly set "0" as resource version - it's fine for the List()
|
||||
// to be served from cache and potentially be delayed relative to
|
||||
// etcd contents. Reflector framework will catch up via Watch() eventually.
|
||||
options := metav1.ListOptions{ResourceVersion: "0"}
|
||||
r.metrics.numberOfLists.Inc()
|
||||
start := r.clock.Now()
|
||||
list, err := r.listerWatcher.List(options)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: Failed to list %v: %v", r.name, r.expectedType, err)
|
||||
}
|
||||
r.metrics.listDuration.Observe(time.Since(start).Seconds())
|
||||
listMetaInterface, err := meta.ListAccessor(list)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: Unable to understand list result %#v: %v", r.name, list, err)
|
||||
}
|
||||
resourceVersion = listMetaInterface.GetResourceVersion()
|
||||
items, err := meta.ExtractList(list)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: Unable to understand list result %#v (%v)", r.name, list, err)
|
||||
}
|
||||
r.metrics.numberOfItemsInList.Observe(float64(len(items)))
|
||||
if err := r.syncWith(items, resourceVersion); err != nil {
|
||||
return fmt.Errorf("%s: Unable to sync list result: %v", r.name, err)
|
||||
}
|
||||
r.setLastSyncResourceVersion(resourceVersion)
|
||||
|
||||
resyncerrc := make(chan error, 1)
|
||||
cancelCh := make(chan struct{})
|
||||
defer close(cancelCh)
|
||||
go func() {
|
||||
resyncCh, cleanup := r.resyncChan()
|
||||
defer func() {
|
||||
cleanup() // Call the last one written into cleanup
|
||||
}()
|
||||
for {
|
||||
select {
|
||||
case <-resyncCh:
|
||||
case <-stopCh:
|
||||
return
|
||||
case <-cancelCh:
|
||||
return
|
||||
}
|
||||
if r.ShouldResync == nil || r.ShouldResync() {
|
||||
glog.V(4).Infof("%s: forcing resync", r.name)
|
||||
if err := r.store.Resync(); err != nil {
|
||||
resyncerrc <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
cleanup()
|
||||
resyncCh, cleanup = r.resyncChan()
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
timemoutseconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0))
|
||||
options = metav1.ListOptions{
|
||||
ResourceVersion: resourceVersion,
|
||||
// We want to avoid situations of hanging watchers. Stop any wachers that do not
|
||||
// receive any events within the timeout window.
|
||||
TimeoutSeconds: &timemoutseconds,
|
||||
}
|
||||
|
||||
r.metrics.numberOfWatches.Inc()
|
||||
w, err := r.listerWatcher.Watch(options)
|
||||
if err != nil {
|
||||
switch err {
|
||||
case io.EOF:
|
||||
// watch closed normally
|
||||
case io.ErrUnexpectedEOF:
|
||||
glog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.expectedType, err)
|
||||
default:
|
||||
utilruntime.HandleError(fmt.Errorf("%s: Failed to watch %v: %v", r.name, r.expectedType, err))
|
||||
}
|
||||
// If this is "connection refused" error, it means that most likely apiserver is not responsive.
|
||||
// It doesn't make sense to re-list all objects because most likely we will be able to restart
|
||||
// watch where we ended.
|
||||
// If that's the case wait and resend watch request.
|
||||
if urlError, ok := err.(*url.Error); ok {
|
||||
if opError, ok := urlError.Err.(*net.OpError); ok {
|
||||
if errno, ok := opError.Err.(syscall.Errno); ok && errno == syscall.ECONNREFUSED {
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := r.watchHandler(w, &resourceVersion, resyncerrc, stopCh); err != nil {
|
||||
if err != errorStopRequested {
|
||||
glog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedType, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// syncWith replaces the store's items with the given list.
|
||||
func (r *Reflector) syncWith(items []runtime.Object, resourceVersion string) error {
|
||||
found := make([]interface{}, 0, len(items))
|
||||
for _, item := range items {
|
||||
found = append(found, item)
|
||||
}
|
||||
return r.store.Replace(found, resourceVersion)
|
||||
}
|
||||
|
||||
// watchHandler watches w and keeps *resourceVersion up to date.
|
||||
func (r *Reflector) watchHandler(w watch.Interface, resourceVersion *string, errc chan error, stopCh <-chan struct{}) error {
|
||||
start := r.clock.Now()
|
||||
eventCount := 0
|
||||
|
||||
// Stopping the watcher should be idempotent and if we return from this function there's no way
|
||||
// we're coming back in with the same watch interface.
|
||||
defer w.Stop()
|
||||
// update metrics
|
||||
defer func() {
|
||||
r.metrics.numberOfItemsInWatch.Observe(float64(eventCount))
|
||||
r.metrics.watchDuration.Observe(time.Since(start).Seconds())
|
||||
}()
|
||||
|
||||
loop:
|
||||
for {
|
||||
select {
|
||||
case <-stopCh:
|
||||
return errorStopRequested
|
||||
case err := <-errc:
|
||||
return err
|
||||
case event, ok := <-w.ResultChan():
|
||||
if !ok {
|
||||
break loop
|
||||
}
|
||||
if event.Type == watch.Error {
|
||||
return apierrs.FromObject(event.Object)
|
||||
}
|
||||
if e, a := r.expectedType, reflect.TypeOf(event.Object); e != nil && e != a {
|
||||
utilruntime.HandleError(fmt.Errorf("%s: expected type %v, but watch event object had type %v", r.name, e, a))
|
||||
continue
|
||||
}
|
||||
meta, err := meta.Accessor(event.Object)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", r.name, event))
|
||||
continue
|
||||
}
|
||||
newResourceVersion := meta.GetResourceVersion()
|
||||
switch event.Type {
|
||||
case watch.Added:
|
||||
err := r.store.Add(event.Object)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("%s: unable to add watch event object (%#v) to store: %v", r.name, event.Object, err))
|
||||
}
|
||||
case watch.Modified:
|
||||
err := r.store.Update(event.Object)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("%s: unable to update watch event object (%#v) to store: %v", r.name, event.Object, err))
|
||||
}
|
||||
case watch.Deleted:
|
||||
// TODO: Will any consumers need access to the "last known
|
||||
// state", which is passed in event.Object? If so, may need
|
||||
// to change this.
|
||||
err := r.store.Delete(event.Object)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("%s: unable to delete watch event object (%#v) from store: %v", r.name, event.Object, err))
|
||||
}
|
||||
default:
|
||||
utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", r.name, event))
|
||||
}
|
||||
*resourceVersion = newResourceVersion
|
||||
r.setLastSyncResourceVersion(newResourceVersion)
|
||||
eventCount++
|
||||
}
|
||||
}
|
||||
|
||||
watchDuration := r.clock.Now().Sub(start)
|
||||
if watchDuration < 1*time.Second && eventCount == 0 {
|
||||
r.metrics.numberOfShortWatches.Inc()
|
||||
return fmt.Errorf("very short watch: %s: Unexpected watch close - watch lasted less than a second and no items received", r.name)
|
||||
}
|
||||
glog.V(4).Infof("%s: Watch close - %v total %v items received", r.name, r.expectedType, eventCount)
|
||||
return nil
|
||||
}
|
||||
|
||||
// LastSyncResourceVersion is the resource version observed when last sync with the underlying store
|
||||
// The value returned is not synchronized with access to the underlying store and is not thread-safe
|
||||
func (r *Reflector) LastSyncResourceVersion() string {
|
||||
r.lastSyncResourceVersionMutex.RLock()
|
||||
defer r.lastSyncResourceVersionMutex.RUnlock()
|
||||
return r.lastSyncResourceVersion
|
||||
}
|
||||
|
||||
func (r *Reflector) setLastSyncResourceVersion(v string) {
|
||||
r.lastSyncResourceVersionMutex.Lock()
|
||||
defer r.lastSyncResourceVersionMutex.Unlock()
|
||||
r.lastSyncResourceVersion = v
|
||||
|
||||
rv, err := strconv.Atoi(v)
|
||||
if err == nil {
|
||||
r.metrics.lastResourceVersion.Set(float64(rv))
|
||||
}
|
||||
}
|
|
@ -0,0 +1,119 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This file provides abstractions for setting the provider (e.g., prometheus)
|
||||
// of metrics.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
// GaugeMetric represents a single numerical value that can arbitrarily go up
|
||||
// and down.
|
||||
type GaugeMetric interface {
|
||||
Set(float64)
|
||||
}
|
||||
|
||||
// CounterMetric represents a single numerical value that only ever
|
||||
// goes up.
|
||||
type CounterMetric interface {
|
||||
Inc()
|
||||
}
|
||||
|
||||
// SummaryMetric captures individual observations.
|
||||
type SummaryMetric interface {
|
||||
Observe(float64)
|
||||
}
|
||||
|
||||
type noopMetric struct{}
|
||||
|
||||
func (noopMetric) Inc() {}
|
||||
func (noopMetric) Dec() {}
|
||||
func (noopMetric) Observe(float64) {}
|
||||
func (noopMetric) Set(float64) {}
|
||||
|
||||
type reflectorMetrics struct {
|
||||
numberOfLists CounterMetric
|
||||
listDuration SummaryMetric
|
||||
numberOfItemsInList SummaryMetric
|
||||
|
||||
numberOfWatches CounterMetric
|
||||
numberOfShortWatches CounterMetric
|
||||
watchDuration SummaryMetric
|
||||
numberOfItemsInWatch SummaryMetric
|
||||
|
||||
lastResourceVersion GaugeMetric
|
||||
}
|
||||
|
||||
// MetricsProvider generates various metrics used by the reflector.
|
||||
type MetricsProvider interface {
|
||||
NewListsMetric(name string) CounterMetric
|
||||
NewListDurationMetric(name string) SummaryMetric
|
||||
NewItemsInListMetric(name string) SummaryMetric
|
||||
|
||||
NewWatchesMetric(name string) CounterMetric
|
||||
NewShortWatchesMetric(name string) CounterMetric
|
||||
NewWatchDurationMetric(name string) SummaryMetric
|
||||
NewItemsInWatchMetric(name string) SummaryMetric
|
||||
|
||||
NewLastResourceVersionMetric(name string) GaugeMetric
|
||||
}
|
||||
|
||||
type noopMetricsProvider struct{}
|
||||
|
||||
func (noopMetricsProvider) NewListsMetric(name string) CounterMetric { return noopMetric{} }
|
||||
func (noopMetricsProvider) NewListDurationMetric(name string) SummaryMetric { return noopMetric{} }
|
||||
func (noopMetricsProvider) NewItemsInListMetric(name string) SummaryMetric { return noopMetric{} }
|
||||
func (noopMetricsProvider) NewWatchesMetric(name string) CounterMetric { return noopMetric{} }
|
||||
func (noopMetricsProvider) NewShortWatchesMetric(name string) CounterMetric { return noopMetric{} }
|
||||
func (noopMetricsProvider) NewWatchDurationMetric(name string) SummaryMetric { return noopMetric{} }
|
||||
func (noopMetricsProvider) NewItemsInWatchMetric(name string) SummaryMetric { return noopMetric{} }
|
||||
func (noopMetricsProvider) NewLastResourceVersionMetric(name string) GaugeMetric {
|
||||
return noopMetric{}
|
||||
}
|
||||
|
||||
var metricsFactory = struct {
|
||||
metricsProvider MetricsProvider
|
||||
setProviders sync.Once
|
||||
}{
|
||||
metricsProvider: noopMetricsProvider{},
|
||||
}
|
||||
|
||||
func newReflectorMetrics(name string) *reflectorMetrics {
|
||||
var ret *reflectorMetrics
|
||||
if len(name) == 0 {
|
||||
return ret
|
||||
}
|
||||
return &reflectorMetrics{
|
||||
numberOfLists: metricsFactory.metricsProvider.NewListsMetric(name),
|
||||
listDuration: metricsFactory.metricsProvider.NewListDurationMetric(name),
|
||||
numberOfItemsInList: metricsFactory.metricsProvider.NewItemsInListMetric(name),
|
||||
numberOfWatches: metricsFactory.metricsProvider.NewWatchesMetric(name),
|
||||
numberOfShortWatches: metricsFactory.metricsProvider.NewShortWatchesMetric(name),
|
||||
watchDuration: metricsFactory.metricsProvider.NewWatchDurationMetric(name),
|
||||
numberOfItemsInWatch: metricsFactory.metricsProvider.NewItemsInWatchMetric(name),
|
||||
lastResourceVersion: metricsFactory.metricsProvider.NewLastResourceVersionMetric(name),
|
||||
}
|
||||
}
|
||||
|
||||
// SetReflectorMetricsProvider sets the metrics provider
|
||||
func SetReflectorMetricsProvider(metricsProvider MetricsProvider) {
|
||||
metricsFactory.setProviders.Do(func() {
|
||||
metricsFactory.metricsProvider = metricsProvider
|
||||
})
|
||||
}
|
|
@ -0,0 +1,579 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// SharedInformer has a shared data cache and is capable of distributing notifications for changes
|
||||
// to the cache to multiple listeners who registered via AddEventHandler. If you use this, there is
|
||||
// one behavior change compared to a standard Informer. When you receive a notification, the cache
|
||||
// will be AT LEAST as fresh as the notification, but it MAY be more fresh. You should NOT depend
|
||||
// on the contents of the cache exactly matching the notification you've received in handler
|
||||
// functions. If there was a create, followed by a delete, the cache may NOT have your item. This
|
||||
// has advantages over the broadcaster since it allows us to share a common cache across many
|
||||
// controllers. Extending the broadcaster would have required us keep duplicate caches for each
|
||||
// watch.
|
||||
type SharedInformer interface {
|
||||
// AddEventHandler adds an event handler to the shared informer using the shared informer's resync
|
||||
// period. Events to a single handler are delivered sequentially, but there is no coordination
|
||||
// between different handlers.
|
||||
AddEventHandler(handler ResourceEventHandler)
|
||||
// AddEventHandlerWithResyncPeriod adds an event handler to the shared informer using the
|
||||
// specified resync period. Events to a single handler are delivered sequentially, but there is
|
||||
// no coordination between different handlers.
|
||||
AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration)
|
||||
// GetStore returns the Store.
|
||||
GetStore() Store
|
||||
// GetController gives back a synthetic interface that "votes" to start the informer
|
||||
GetController() Controller
|
||||
// Run starts the shared informer, which will be stopped when stopCh is closed.
|
||||
Run(stopCh <-chan struct{})
|
||||
// HasSynced returns true if the shared informer's store has synced.
|
||||
HasSynced() bool
|
||||
// LastSyncResourceVersion is the resource version observed when last synced with the underlying
|
||||
// store. The value returned is not synchronized with access to the underlying store and is not
|
||||
// thread-safe.
|
||||
LastSyncResourceVersion() string
|
||||
}
|
||||
|
||||
type SharedIndexInformer interface {
|
||||
SharedInformer
|
||||
// AddIndexers add indexers to the informer before it starts.
|
||||
AddIndexers(indexers Indexers) error
|
||||
GetIndexer() Indexer
|
||||
}
|
||||
|
||||
// NewSharedInformer creates a new instance for the listwatcher.
|
||||
func NewSharedInformer(lw ListerWatcher, objType runtime.Object, resyncPeriod time.Duration) SharedInformer {
|
||||
return NewSharedIndexInformer(lw, objType, resyncPeriod, Indexers{})
|
||||
}
|
||||
|
||||
// NewSharedIndexInformer creates a new instance for the listwatcher.
|
||||
func NewSharedIndexInformer(lw ListerWatcher, objType runtime.Object, defaultEventHandlerResyncPeriod time.Duration, indexers Indexers) SharedIndexInformer {
|
||||
realClock := &clock.RealClock{}
|
||||
sharedIndexInformer := &sharedIndexInformer{
|
||||
processor: &sharedProcessor{clock: realClock},
|
||||
indexer: NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers),
|
||||
listerWatcher: lw,
|
||||
objectType: objType,
|
||||
resyncCheckPeriod: defaultEventHandlerResyncPeriod,
|
||||
defaultEventHandlerResyncPeriod: defaultEventHandlerResyncPeriod,
|
||||
cacheMutationDetector: NewCacheMutationDetector(fmt.Sprintf("%T", objType)),
|
||||
clock: realClock,
|
||||
}
|
||||
return sharedIndexInformer
|
||||
}
|
||||
|
||||
// InformerSynced is a function that can be used to determine if an informer has synced. This is useful for determining if caches have synced.
|
||||
type InformerSynced func() bool
|
||||
|
||||
// syncedPollPeriod controls how often you look at the status of your sync funcs
|
||||
const syncedPollPeriod = 100 * time.Millisecond
|
||||
|
||||
// WaitForCacheSync waits for caches to populate. It returns true if it was successful, false
|
||||
// if the controller should shutdown
|
||||
func WaitForCacheSync(stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool {
|
||||
err := wait.PollUntil(syncedPollPeriod,
|
||||
func() (bool, error) {
|
||||
for _, syncFunc := range cacheSyncs {
|
||||
if !syncFunc() {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
},
|
||||
stopCh)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("stop requested")
|
||||
return false
|
||||
}
|
||||
|
||||
glog.V(4).Infof("caches populated")
|
||||
return true
|
||||
}
|
||||
|
||||
type sharedIndexInformer struct {
|
||||
indexer Indexer
|
||||
controller Controller
|
||||
|
||||
processor *sharedProcessor
|
||||
cacheMutationDetector CacheMutationDetector
|
||||
|
||||
// This block is tracked to handle late initialization of the controller
|
||||
listerWatcher ListerWatcher
|
||||
objectType runtime.Object
|
||||
|
||||
// resyncCheckPeriod is how often we want the reflector's resync timer to fire so it can call
|
||||
// shouldResync to check if any of our listeners need a resync.
|
||||
resyncCheckPeriod time.Duration
|
||||
// defaultEventHandlerResyncPeriod is the default resync period for any handlers added via
|
||||
// AddEventHandler (i.e. they don't specify one and just want to use the shared informer's default
|
||||
// value).
|
||||
defaultEventHandlerResyncPeriod time.Duration
|
||||
// clock allows for testability
|
||||
clock clock.Clock
|
||||
|
||||
started, stopped bool
|
||||
startedLock sync.Mutex
|
||||
|
||||
// blockDeltas gives a way to stop all event distribution so that a late event handler
|
||||
// can safely join the shared informer.
|
||||
blockDeltas sync.Mutex
|
||||
}
|
||||
|
||||
// dummyController hides the fact that a SharedInformer is different from a dedicated one
|
||||
// where a caller can `Run`. The run method is disconnected in this case, because higher
|
||||
// level logic will decide when to start the SharedInformer and related controller.
|
||||
// Because returning information back is always asynchronous, the legacy callers shouldn't
|
||||
// notice any change in behavior.
|
||||
type dummyController struct {
|
||||
informer *sharedIndexInformer
|
||||
}
|
||||
|
||||
func (v *dummyController) Run(stopCh <-chan struct{}) {
|
||||
}
|
||||
|
||||
func (v *dummyController) HasSynced() bool {
|
||||
return v.informer.HasSynced()
|
||||
}
|
||||
|
||||
func (c *dummyController) LastSyncResourceVersion() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
type updateNotification struct {
|
||||
oldObj interface{}
|
||||
newObj interface{}
|
||||
}
|
||||
|
||||
type addNotification struct {
|
||||
newObj interface{}
|
||||
}
|
||||
|
||||
type deleteNotification struct {
|
||||
oldObj interface{}
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
|
||||
fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, nil, s.indexer)
|
||||
|
||||
cfg := &Config{
|
||||
Queue: fifo,
|
||||
ListerWatcher: s.listerWatcher,
|
||||
ObjectType: s.objectType,
|
||||
FullResyncPeriod: s.resyncCheckPeriod,
|
||||
RetryOnError: false,
|
||||
ShouldResync: s.processor.shouldResync,
|
||||
|
||||
Process: s.HandleDeltas,
|
||||
}
|
||||
|
||||
func() {
|
||||
s.startedLock.Lock()
|
||||
defer s.startedLock.Unlock()
|
||||
|
||||
s.controller = New(cfg)
|
||||
s.controller.(*controller).clock = s.clock
|
||||
s.started = true
|
||||
}()
|
||||
|
||||
// Separate stop channel because Processor should be stopped strictly after controller
|
||||
processorStopCh := make(chan struct{})
|
||||
var wg wait.Group
|
||||
defer wg.Wait() // Wait for Processor to stop
|
||||
defer close(processorStopCh) // Tell Processor to stop
|
||||
wg.StartWithChannel(processorStopCh, s.cacheMutationDetector.Run)
|
||||
wg.StartWithChannel(processorStopCh, s.processor.run)
|
||||
|
||||
defer func() {
|
||||
s.startedLock.Lock()
|
||||
defer s.startedLock.Unlock()
|
||||
s.stopped = true // Don't want any new listeners
|
||||
}()
|
||||
s.controller.Run(stopCh)
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) HasSynced() bool {
|
||||
s.startedLock.Lock()
|
||||
defer s.startedLock.Unlock()
|
||||
|
||||
if s.controller == nil {
|
||||
return false
|
||||
}
|
||||
return s.controller.HasSynced()
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) LastSyncResourceVersion() string {
|
||||
s.startedLock.Lock()
|
||||
defer s.startedLock.Unlock()
|
||||
|
||||
if s.controller == nil {
|
||||
return ""
|
||||
}
|
||||
return s.controller.LastSyncResourceVersion()
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) GetStore() Store {
|
||||
return s.indexer
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) GetIndexer() Indexer {
|
||||
return s.indexer
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) AddIndexers(indexers Indexers) error {
|
||||
s.startedLock.Lock()
|
||||
defer s.startedLock.Unlock()
|
||||
|
||||
if s.started {
|
||||
return fmt.Errorf("informer has already started")
|
||||
}
|
||||
|
||||
return s.indexer.AddIndexers(indexers)
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) GetController() Controller {
|
||||
return &dummyController{informer: s}
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) AddEventHandler(handler ResourceEventHandler) {
|
||||
s.AddEventHandlerWithResyncPeriod(handler, s.defaultEventHandlerResyncPeriod)
|
||||
}
|
||||
|
||||
func determineResyncPeriod(desired, check time.Duration) time.Duration {
|
||||
if desired == 0 {
|
||||
return desired
|
||||
}
|
||||
if check == 0 {
|
||||
glog.Warningf("The specified resyncPeriod %v is invalid because this shared informer doesn't support resyncing", desired)
|
||||
return 0
|
||||
}
|
||||
if desired < check {
|
||||
glog.Warningf("The specified resyncPeriod %v is being increased to the minimum resyncCheckPeriod %v", desired, check)
|
||||
return check
|
||||
}
|
||||
return desired
|
||||
}
|
||||
|
||||
const minimumResyncPeriod = 1 * time.Second
|
||||
|
||||
func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration) {
|
||||
s.startedLock.Lock()
|
||||
defer s.startedLock.Unlock()
|
||||
|
||||
if s.stopped {
|
||||
glog.V(2).Infof("Handler %v was not added to shared informer because it has stopped already", handler)
|
||||
return
|
||||
}
|
||||
|
||||
if resyncPeriod > 0 {
|
||||
if resyncPeriod < minimumResyncPeriod {
|
||||
glog.Warningf("resyncPeriod %d is too small. Changing it to the minimum allowed value of %d", resyncPeriod, minimumResyncPeriod)
|
||||
resyncPeriod = minimumResyncPeriod
|
||||
}
|
||||
|
||||
if resyncPeriod < s.resyncCheckPeriod {
|
||||
if s.started {
|
||||
glog.Warningf("resyncPeriod %d is smaller than resyncCheckPeriod %d and the informer has already started. Changing it to %d", resyncPeriod, s.resyncCheckPeriod, s.resyncCheckPeriod)
|
||||
resyncPeriod = s.resyncCheckPeriod
|
||||
} else {
|
||||
// if the event handler's resyncPeriod is smaller than the current resyncCheckPeriod, update
|
||||
// resyncCheckPeriod to match resyncPeriod and adjust the resync periods of all the listeners
|
||||
// accordingly
|
||||
s.resyncCheckPeriod = resyncPeriod
|
||||
s.processor.resyncCheckPeriodChanged(resyncPeriod)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
listener := newProcessListener(handler, resyncPeriod, determineResyncPeriod(resyncPeriod, s.resyncCheckPeriod), s.clock.Now())
|
||||
|
||||
if !s.started {
|
||||
s.processor.addListener(listener)
|
||||
return
|
||||
}
|
||||
|
||||
// in order to safely join, we have to
|
||||
// 1. stop sending add/update/delete notifications
|
||||
// 2. do a list against the store
|
||||
// 3. send synthetic "Add" events to the new handler
|
||||
// 4. unblock
|
||||
s.blockDeltas.Lock()
|
||||
defer s.blockDeltas.Unlock()
|
||||
|
||||
s.processor.addAndStartListener(listener)
|
||||
for _, item := range s.indexer.List() {
|
||||
listener.add(addNotification{newObj: item})
|
||||
}
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error {
|
||||
s.blockDeltas.Lock()
|
||||
defer s.blockDeltas.Unlock()
|
||||
|
||||
// from oldest to newest
|
||||
for _, d := range obj.(Deltas) {
|
||||
switch d.Type {
|
||||
case Sync, Added, Updated:
|
||||
isSync := d.Type == Sync
|
||||
s.cacheMutationDetector.AddObject(d.Object)
|
||||
if old, exists, err := s.indexer.Get(d.Object); err == nil && exists {
|
||||
if err := s.indexer.Update(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
s.processor.distribute(updateNotification{oldObj: old, newObj: d.Object}, isSync)
|
||||
} else {
|
||||
if err := s.indexer.Add(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
s.processor.distribute(addNotification{newObj: d.Object}, isSync)
|
||||
}
|
||||
case Deleted:
|
||||
if err := s.indexer.Delete(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
s.processor.distribute(deleteNotification{oldObj: d.Object}, false)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type sharedProcessor struct {
|
||||
listenersLock sync.RWMutex
|
||||
listeners []*processorListener
|
||||
syncingListeners []*processorListener
|
||||
clock clock.Clock
|
||||
wg wait.Group
|
||||
}
|
||||
|
||||
func (p *sharedProcessor) addAndStartListener(listener *processorListener) {
|
||||
p.listenersLock.Lock()
|
||||
defer p.listenersLock.Unlock()
|
||||
|
||||
p.addListenerLocked(listener)
|
||||
p.wg.Start(listener.run)
|
||||
p.wg.Start(listener.pop)
|
||||
}
|
||||
|
||||
func (p *sharedProcessor) addListener(listener *processorListener) {
|
||||
p.listenersLock.Lock()
|
||||
defer p.listenersLock.Unlock()
|
||||
|
||||
p.addListenerLocked(listener)
|
||||
}
|
||||
|
||||
func (p *sharedProcessor) addListenerLocked(listener *processorListener) {
|
||||
p.listeners = append(p.listeners, listener)
|
||||
p.syncingListeners = append(p.syncingListeners, listener)
|
||||
}
|
||||
|
||||
func (p *sharedProcessor) distribute(obj interface{}, sync bool) {
|
||||
p.listenersLock.RLock()
|
||||
defer p.listenersLock.RUnlock()
|
||||
|
||||
if sync {
|
||||
for _, listener := range p.syncingListeners {
|
||||
listener.add(obj)
|
||||
}
|
||||
} else {
|
||||
for _, listener := range p.listeners {
|
||||
listener.add(obj)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *sharedProcessor) run(stopCh <-chan struct{}) {
|
||||
func() {
|
||||
p.listenersLock.RLock()
|
||||
defer p.listenersLock.RUnlock()
|
||||
for _, listener := range p.listeners {
|
||||
p.wg.Start(listener.run)
|
||||
p.wg.Start(listener.pop)
|
||||
}
|
||||
}()
|
||||
<-stopCh
|
||||
p.listenersLock.RLock()
|
||||
defer p.listenersLock.RUnlock()
|
||||
for _, listener := range p.listeners {
|
||||
close(listener.addCh) // Tell .pop() to stop. .pop() will tell .run() to stop
|
||||
}
|
||||
p.wg.Wait() // Wait for all .pop() and .run() to stop
|
||||
}
|
||||
|
||||
// shouldResync queries every listener to determine if any of them need a resync, based on each
|
||||
// listener's resyncPeriod.
|
||||
func (p *sharedProcessor) shouldResync() bool {
|
||||
p.listenersLock.Lock()
|
||||
defer p.listenersLock.Unlock()
|
||||
|
||||
p.syncingListeners = []*processorListener{}
|
||||
|
||||
resyncNeeded := false
|
||||
now := p.clock.Now()
|
||||
for _, listener := range p.listeners {
|
||||
// need to loop through all the listeners to see if they need to resync so we can prepare any
|
||||
// listeners that are going to be resyncing.
|
||||
if listener.shouldResync(now) {
|
||||
resyncNeeded = true
|
||||
p.syncingListeners = append(p.syncingListeners, listener)
|
||||
listener.determineNextResync(now)
|
||||
}
|
||||
}
|
||||
return resyncNeeded
|
||||
}
|
||||
|
||||
func (p *sharedProcessor) resyncCheckPeriodChanged(resyncCheckPeriod time.Duration) {
|
||||
p.listenersLock.RLock()
|
||||
defer p.listenersLock.RUnlock()
|
||||
|
||||
for _, listener := range p.listeners {
|
||||
resyncPeriod := determineResyncPeriod(listener.requestedResyncPeriod, resyncCheckPeriod)
|
||||
listener.setResyncPeriod(resyncPeriod)
|
||||
}
|
||||
}
|
||||
|
||||
type processorListener struct {
|
||||
nextCh chan interface{}
|
||||
addCh chan interface{}
|
||||
|
||||
handler ResourceEventHandler
|
||||
|
||||
// requestedResyncPeriod is how frequently the listener wants a full resync from the shared informer
|
||||
requestedResyncPeriod time.Duration
|
||||
// resyncPeriod is how frequently the listener wants a full resync from the shared informer. This
|
||||
// value may differ from requestedResyncPeriod if the shared informer adjusts it to align with the
|
||||
// informer's overall resync check period.
|
||||
resyncPeriod time.Duration
|
||||
// nextResync is the earliest time the listener should get a full resync
|
||||
nextResync time.Time
|
||||
// resyncLock guards access to resyncPeriod and nextResync
|
||||
resyncLock sync.Mutex
|
||||
}
|
||||
|
||||
func newProcessListener(handler ResourceEventHandler, requestedResyncPeriod, resyncPeriod time.Duration, now time.Time) *processorListener {
|
||||
ret := &processorListener{
|
||||
nextCh: make(chan interface{}),
|
||||
addCh: make(chan interface{}),
|
||||
handler: handler,
|
||||
requestedResyncPeriod: requestedResyncPeriod,
|
||||
resyncPeriod: resyncPeriod,
|
||||
}
|
||||
|
||||
ret.determineNextResync(now)
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func (p *processorListener) add(notification interface{}) {
|
||||
p.addCh <- notification
|
||||
}
|
||||
|
||||
func (p *processorListener) pop() {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer close(p.nextCh) // Tell .run() to stop
|
||||
|
||||
// pendingNotifications is an unbounded slice that holds all notifications not yet distributed
|
||||
// there is one per listener, but a failing/stalled listener will have infinite pendingNotifications
|
||||
// added until we OOM.
|
||||
// TODO This is no worse than before, since reflectors were backed by unbounded DeltaFIFOs, but
|
||||
// we should try to do something better
|
||||
var pendingNotifications []interface{}
|
||||
var nextCh chan<- interface{}
|
||||
var notification interface{}
|
||||
for {
|
||||
select {
|
||||
case nextCh <- notification:
|
||||
// Notification dispatched
|
||||
if len(pendingNotifications) == 0 { // Nothing to pop
|
||||
nextCh = nil // Disable this select case
|
||||
notification = nil
|
||||
} else {
|
||||
notification = pendingNotifications[0]
|
||||
pendingNotifications[0] = nil
|
||||
pendingNotifications = pendingNotifications[1:]
|
||||
}
|
||||
case notificationToAdd, ok := <-p.addCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if notification == nil { // No notification to pop (and pendingNotifications is empty)
|
||||
// Optimize the case - skip adding to pendingNotifications
|
||||
notification = notificationToAdd
|
||||
nextCh = p.nextCh
|
||||
} else { // There is already a notification waiting to be dispatched
|
||||
pendingNotifications = append(pendingNotifications, notificationToAdd)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *processorListener) run() {
|
||||
defer utilruntime.HandleCrash()
|
||||
|
||||
for next := range p.nextCh {
|
||||
switch notification := next.(type) {
|
||||
case updateNotification:
|
||||
p.handler.OnUpdate(notification.oldObj, notification.newObj)
|
||||
case addNotification:
|
||||
p.handler.OnAdd(notification.newObj)
|
||||
case deleteNotification:
|
||||
p.handler.OnDelete(notification.oldObj)
|
||||
default:
|
||||
utilruntime.HandleError(fmt.Errorf("unrecognized notification: %#v", next))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// shouldResync deterimines if the listener needs a resync. If the listener's resyncPeriod is 0,
|
||||
// this always returns false.
|
||||
func (p *processorListener) shouldResync(now time.Time) bool {
|
||||
p.resyncLock.Lock()
|
||||
defer p.resyncLock.Unlock()
|
||||
|
||||
if p.resyncPeriod == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
return now.After(p.nextResync) || now.Equal(p.nextResync)
|
||||
}
|
||||
|
||||
func (p *processorListener) determineNextResync(now time.Time) {
|
||||
p.resyncLock.Lock()
|
||||
defer p.resyncLock.Unlock()
|
||||
|
||||
p.nextResync = now.Add(p.resyncPeriod)
|
||||
}
|
||||
|
||||
func (p *processorListener) setResyncPeriod(resyncPeriod time.Duration) {
|
||||
p.resyncLock.Lock()
|
||||
defer p.resyncLock.Unlock()
|
||||
|
||||
p.resyncPeriod = resyncPeriod
|
||||
}
|
|
@ -0,0 +1,244 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
)
|
||||
|
||||
// Store is a generic object storage interface. Reflector knows how to watch a server
|
||||
// and update a store. A generic store is provided, which allows Reflector to be used
|
||||
// as a local caching system, and an LRU store, which allows Reflector to work like a
|
||||
// queue of items yet to be processed.
|
||||
//
|
||||
// Store makes no assumptions about stored object identity; it is the responsibility
|
||||
// of a Store implementation to provide a mechanism to correctly key objects and to
|
||||
// define the contract for obtaining objects by some arbitrary key type.
|
||||
type Store interface {
|
||||
Add(obj interface{}) error
|
||||
Update(obj interface{}) error
|
||||
Delete(obj interface{}) error
|
||||
List() []interface{}
|
||||
ListKeys() []string
|
||||
Get(obj interface{}) (item interface{}, exists bool, err error)
|
||||
GetByKey(key string) (item interface{}, exists bool, err error)
|
||||
|
||||
// Replace will delete the contents of the store, using instead the
|
||||
// given list. Store takes ownership of the list, you should not reference
|
||||
// it after calling this function.
|
||||
Replace([]interface{}, string) error
|
||||
Resync() error
|
||||
}
|
||||
|
||||
// KeyFunc knows how to make a key from an object. Implementations should be deterministic.
|
||||
type KeyFunc func(obj interface{}) (string, error)
|
||||
|
||||
// KeyError will be returned any time a KeyFunc gives an error; it includes the object
|
||||
// at fault.
|
||||
type KeyError struct {
|
||||
Obj interface{}
|
||||
Err error
|
||||
}
|
||||
|
||||
// Error gives a human-readable description of the error.
|
||||
func (k KeyError) Error() string {
|
||||
return fmt.Sprintf("couldn't create key for object %+v: %v", k.Obj, k.Err)
|
||||
}
|
||||
|
||||
// ExplicitKey can be passed to MetaNamespaceKeyFunc if you have the key for
|
||||
// the object but not the object itself.
|
||||
type ExplicitKey string
|
||||
|
||||
// MetaNamespaceKeyFunc is a convenient default KeyFunc which knows how to make
|
||||
// keys for API objects which implement meta.Interface.
|
||||
// The key uses the format <namespace>/<name> unless <namespace> is empty, then
|
||||
// it's just <name>.
|
||||
//
|
||||
// TODO: replace key-as-string with a key-as-struct so that this
|
||||
// packing/unpacking won't be necessary.
|
||||
func MetaNamespaceKeyFunc(obj interface{}) (string, error) {
|
||||
if key, ok := obj.(ExplicitKey); ok {
|
||||
return string(key), nil
|
||||
}
|
||||
meta, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("object has no meta: %v", err)
|
||||
}
|
||||
if len(meta.GetNamespace()) > 0 {
|
||||
return meta.GetNamespace() + "/" + meta.GetName(), nil
|
||||
}
|
||||
return meta.GetName(), nil
|
||||
}
|
||||
|
||||
// SplitMetaNamespaceKey returns the namespace and name that
|
||||
// MetaNamespaceKeyFunc encoded into key.
|
||||
//
|
||||
// TODO: replace key-as-string with a key-as-struct so that this
|
||||
// packing/unpacking won't be necessary.
|
||||
func SplitMetaNamespaceKey(key string) (namespace, name string, err error) {
|
||||
parts := strings.Split(key, "/")
|
||||
switch len(parts) {
|
||||
case 1:
|
||||
// name only, no namespace
|
||||
return "", parts[0], nil
|
||||
case 2:
|
||||
// namespace and name
|
||||
return parts[0], parts[1], nil
|
||||
}
|
||||
|
||||
return "", "", fmt.Errorf("unexpected key format: %q", key)
|
||||
}
|
||||
|
||||
// cache responsibilities are limited to:
|
||||
// 1. Computing keys for objects via keyFunc
|
||||
// 2. Invoking methods of a ThreadSafeStorage interface
|
||||
type cache struct {
|
||||
// cacheStorage bears the burden of thread safety for the cache
|
||||
cacheStorage ThreadSafeStore
|
||||
// keyFunc is used to make the key for objects stored in and retrieved from items, and
|
||||
// should be deterministic.
|
||||
keyFunc KeyFunc
|
||||
}
|
||||
|
||||
var _ Store = &cache{}
|
||||
|
||||
// Add inserts an item into the cache.
|
||||
func (c *cache) Add(obj interface{}) error {
|
||||
key, err := c.keyFunc(obj)
|
||||
if err != nil {
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
c.cacheStorage.Add(key, obj)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update sets an item in the cache to its updated state.
|
||||
func (c *cache) Update(obj interface{}) error {
|
||||
key, err := c.keyFunc(obj)
|
||||
if err != nil {
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
c.cacheStorage.Update(key, obj)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete removes an item from the cache.
|
||||
func (c *cache) Delete(obj interface{}) error {
|
||||
key, err := c.keyFunc(obj)
|
||||
if err != nil {
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
c.cacheStorage.Delete(key)
|
||||
return nil
|
||||
}
|
||||
|
||||
// List returns a list of all the items.
|
||||
// List is completely threadsafe as long as you treat all items as immutable.
|
||||
func (c *cache) List() []interface{} {
|
||||
return c.cacheStorage.List()
|
||||
}
|
||||
|
||||
// ListKeys returns a list of all the keys of the objects currently
|
||||
// in the cache.
|
||||
func (c *cache) ListKeys() []string {
|
||||
return c.cacheStorage.ListKeys()
|
||||
}
|
||||
|
||||
// GetIndexers returns the indexers of cache
|
||||
func (c *cache) GetIndexers() Indexers {
|
||||
return c.cacheStorage.GetIndexers()
|
||||
}
|
||||
|
||||
// Index returns a list of items that match on the index function
|
||||
// Index is thread-safe so long as you treat all items as immutable
|
||||
func (c *cache) Index(indexName string, obj interface{}) ([]interface{}, error) {
|
||||
return c.cacheStorage.Index(indexName, obj)
|
||||
}
|
||||
|
||||
func (c *cache) IndexKeys(indexName, indexKey string) ([]string, error) {
|
||||
return c.cacheStorage.IndexKeys(indexName, indexKey)
|
||||
}
|
||||
|
||||
// ListIndexFuncValues returns the list of generated values of an Index func
|
||||
func (c *cache) ListIndexFuncValues(indexName string) []string {
|
||||
return c.cacheStorage.ListIndexFuncValues(indexName)
|
||||
}
|
||||
|
||||
func (c *cache) ByIndex(indexName, indexKey string) ([]interface{}, error) {
|
||||
return c.cacheStorage.ByIndex(indexName, indexKey)
|
||||
}
|
||||
|
||||
func (c *cache) AddIndexers(newIndexers Indexers) error {
|
||||
return c.cacheStorage.AddIndexers(newIndexers)
|
||||
}
|
||||
|
||||
// Get returns the requested item, or sets exists=false.
|
||||
// Get is completely threadsafe as long as you treat all items as immutable.
|
||||
func (c *cache) Get(obj interface{}) (item interface{}, exists bool, err error) {
|
||||
key, err := c.keyFunc(obj)
|
||||
if err != nil {
|
||||
return nil, false, KeyError{obj, err}
|
||||
}
|
||||
return c.GetByKey(key)
|
||||
}
|
||||
|
||||
// GetByKey returns the request item, or exists=false.
|
||||
// GetByKey is completely threadsafe as long as you treat all items as immutable.
|
||||
func (c *cache) GetByKey(key string) (item interface{}, exists bool, err error) {
|
||||
item, exists = c.cacheStorage.Get(key)
|
||||
return item, exists, nil
|
||||
}
|
||||
|
||||
// Replace will delete the contents of 'c', using instead the given list.
|
||||
// 'c' takes ownership of the list, you should not reference the list again
|
||||
// after calling this function.
|
||||
func (c *cache) Replace(list []interface{}, resourceVersion string) error {
|
||||
items := map[string]interface{}{}
|
||||
for _, item := range list {
|
||||
key, err := c.keyFunc(item)
|
||||
if err != nil {
|
||||
return KeyError{item, err}
|
||||
}
|
||||
items[key] = item
|
||||
}
|
||||
c.cacheStorage.Replace(items, resourceVersion)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Resync touches all items in the store to force processing
|
||||
func (c *cache) Resync() error {
|
||||
return c.cacheStorage.Resync()
|
||||
}
|
||||
|
||||
// NewStore returns a Store implemented simply with a map and a lock.
|
||||
func NewStore(keyFunc KeyFunc) Store {
|
||||
return &cache{
|
||||
cacheStorage: NewThreadSafeStore(Indexers{}, Indices{}),
|
||||
keyFunc: keyFunc,
|
||||
}
|
||||
}
|
||||
|
||||
// NewIndexer returns an Indexer implemented simply with a map and a lock.
|
||||
func NewIndexer(keyFunc KeyFunc, indexers Indexers) Indexer {
|
||||
return &cache{
|
||||
cacheStorage: NewThreadSafeStore(indexers, Indices{}),
|
||||
keyFunc: keyFunc,
|
||||
}
|
||||
}
|
|
@ -0,0 +1,306 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
// ThreadSafeStore is an interface that allows concurrent access to a storage backend.
|
||||
// TL;DR caveats: you must not modify anything returned by Get or List as it will break
|
||||
// the indexing feature in addition to not being thread safe.
|
||||
//
|
||||
// The guarantees of thread safety provided by List/Get are only valid if the caller
|
||||
// treats returned items as read-only. For example, a pointer inserted in the store
|
||||
// through `Add` will be returned as is by `Get`. Multiple clients might invoke `Get`
|
||||
// on the same key and modify the pointer in a non-thread-safe way. Also note that
|
||||
// modifying objects stored by the indexers (if any) will *not* automatically lead
|
||||
// to a re-index. So it's not a good idea to directly modify the objects returned by
|
||||
// Get/List, in general.
|
||||
type ThreadSafeStore interface {
|
||||
Add(key string, obj interface{})
|
||||
Update(key string, obj interface{})
|
||||
Delete(key string)
|
||||
Get(key string) (item interface{}, exists bool)
|
||||
List() []interface{}
|
||||
ListKeys() []string
|
||||
Replace(map[string]interface{}, string)
|
||||
Index(indexName string, obj interface{}) ([]interface{}, error)
|
||||
IndexKeys(indexName, indexKey string) ([]string, error)
|
||||
ListIndexFuncValues(name string) []string
|
||||
ByIndex(indexName, indexKey string) ([]interface{}, error)
|
||||
GetIndexers() Indexers
|
||||
|
||||
// AddIndexers adds more indexers to this store. If you call this after you already have data
|
||||
// in the store, the results are undefined.
|
||||
AddIndexers(newIndexers Indexers) error
|
||||
Resync() error
|
||||
}
|
||||
|
||||
// threadSafeMap implements ThreadSafeStore
|
||||
type threadSafeMap struct {
|
||||
lock sync.RWMutex
|
||||
items map[string]interface{}
|
||||
|
||||
// indexers maps a name to an IndexFunc
|
||||
indexers Indexers
|
||||
// indices maps a name to an Index
|
||||
indices Indices
|
||||
}
|
||||
|
||||
func (c *threadSafeMap) Add(key string, obj interface{}) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
oldObject := c.items[key]
|
||||
c.items[key] = obj
|
||||
c.updateIndices(oldObject, obj, key)
|
||||
}
|
||||
|
||||
func (c *threadSafeMap) Update(key string, obj interface{}) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
oldObject := c.items[key]
|
||||
c.items[key] = obj
|
||||
c.updateIndices(oldObject, obj, key)
|
||||
}
|
||||
|
||||
func (c *threadSafeMap) Delete(key string) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
if obj, exists := c.items[key]; exists {
|
||||
c.deleteFromIndices(obj, key)
|
||||
delete(c.items, key)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *threadSafeMap) Get(key string) (item interface{}, exists bool) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
item, exists = c.items[key]
|
||||
return item, exists
|
||||
}
|
||||
|
||||
func (c *threadSafeMap) List() []interface{} {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
list := make([]interface{}, 0, len(c.items))
|
||||
for _, item := range c.items {
|
||||
list = append(list, item)
|
||||
}
|
||||
return list
|
||||
}
|
||||
|
||||
// ListKeys returns a list of all the keys of the objects currently
|
||||
// in the threadSafeMap.
|
||||
func (c *threadSafeMap) ListKeys() []string {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
list := make([]string, 0, len(c.items))
|
||||
for key := range c.items {
|
||||
list = append(list, key)
|
||||
}
|
||||
return list
|
||||
}
|
||||
|
||||
func (c *threadSafeMap) Replace(items map[string]interface{}, resourceVersion string) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
c.items = items
|
||||
|
||||
// rebuild any index
|
||||
c.indices = Indices{}
|
||||
for key, item := range c.items {
|
||||
c.updateIndices(nil, item, key)
|
||||
}
|
||||
}
|
||||
|
||||
// Index returns a list of items that match on the index function
|
||||
// Index is thread-safe so long as you treat all items as immutable
|
||||
func (c *threadSafeMap) Index(indexName string, obj interface{}) ([]interface{}, error) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
indexFunc := c.indexers[indexName]
|
||||
if indexFunc == nil {
|
||||
return nil, fmt.Errorf("Index with name %s does not exist", indexName)
|
||||
}
|
||||
|
||||
indexKeys, err := indexFunc(obj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
index := c.indices[indexName]
|
||||
|
||||
// need to de-dupe the return list. Since multiple keys are allowed, this can happen.
|
||||
returnKeySet := sets.String{}
|
||||
for _, indexKey := range indexKeys {
|
||||
set := index[indexKey]
|
||||
for _, key := range set.UnsortedList() {
|
||||
returnKeySet.Insert(key)
|
||||
}
|
||||
}
|
||||
|
||||
list := make([]interface{}, 0, returnKeySet.Len())
|
||||
for absoluteKey := range returnKeySet {
|
||||
list = append(list, c.items[absoluteKey])
|
||||
}
|
||||
return list, nil
|
||||
}
|
||||
|
||||
// ByIndex returns a list of items that match an exact value on the index function
|
||||
func (c *threadSafeMap) ByIndex(indexName, indexKey string) ([]interface{}, error) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
indexFunc := c.indexers[indexName]
|
||||
if indexFunc == nil {
|
||||
return nil, fmt.Errorf("Index with name %s does not exist", indexName)
|
||||
}
|
||||
|
||||
index := c.indices[indexName]
|
||||
|
||||
set := index[indexKey]
|
||||
list := make([]interface{}, 0, set.Len())
|
||||
for _, key := range set.List() {
|
||||
list = append(list, c.items[key])
|
||||
}
|
||||
|
||||
return list, nil
|
||||
}
|
||||
|
||||
// IndexKeys returns a list of keys that match on the index function.
|
||||
// IndexKeys is thread-safe so long as you treat all items as immutable.
|
||||
func (c *threadSafeMap) IndexKeys(indexName, indexKey string) ([]string, error) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
indexFunc := c.indexers[indexName]
|
||||
if indexFunc == nil {
|
||||
return nil, fmt.Errorf("Index with name %s does not exist", indexName)
|
||||
}
|
||||
|
||||
index := c.indices[indexName]
|
||||
|
||||
set := index[indexKey]
|
||||
return set.List(), nil
|
||||
}
|
||||
|
||||
func (c *threadSafeMap) ListIndexFuncValues(indexName string) []string {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
index := c.indices[indexName]
|
||||
names := make([]string, 0, len(index))
|
||||
for key := range index {
|
||||
names = append(names, key)
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
func (c *threadSafeMap) GetIndexers() Indexers {
|
||||
return c.indexers
|
||||
}
|
||||
|
||||
func (c *threadSafeMap) AddIndexers(newIndexers Indexers) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if len(c.items) > 0 {
|
||||
return fmt.Errorf("cannot add indexers to running index")
|
||||
}
|
||||
|
||||
oldKeys := sets.StringKeySet(c.indexers)
|
||||
newKeys := sets.StringKeySet(newIndexers)
|
||||
|
||||
if oldKeys.HasAny(newKeys.List()...) {
|
||||
return fmt.Errorf("indexer conflict: %v", oldKeys.Intersection(newKeys))
|
||||
}
|
||||
|
||||
for k, v := range newIndexers {
|
||||
c.indexers[k] = v
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateIndices modifies the objects location in the managed indexes, if this is an update, you must provide an oldObj
|
||||
// updateIndices must be called from a function that already has a lock on the cache
|
||||
func (c *threadSafeMap) updateIndices(oldObj interface{}, newObj interface{}, key string) error {
|
||||
// if we got an old object, we need to remove it before we add it again
|
||||
if oldObj != nil {
|
||||
c.deleteFromIndices(oldObj, key)
|
||||
}
|
||||
for name, indexFunc := range c.indexers {
|
||||
indexValues, err := indexFunc(newObj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
index := c.indices[name]
|
||||
if index == nil {
|
||||
index = Index{}
|
||||
c.indices[name] = index
|
||||
}
|
||||
|
||||
for _, indexValue := range indexValues {
|
||||
set := index[indexValue]
|
||||
if set == nil {
|
||||
set = sets.String{}
|
||||
index[indexValue] = set
|
||||
}
|
||||
set.Insert(key)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// deleteFromIndices removes the object from each of the managed indexes
|
||||
// it is intended to be called from a function that already has a lock on the cache
|
||||
func (c *threadSafeMap) deleteFromIndices(obj interface{}, key string) error {
|
||||
for name, indexFunc := range c.indexers {
|
||||
indexValues, err := indexFunc(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
index := c.indices[name]
|
||||
if index == nil {
|
||||
continue
|
||||
}
|
||||
for _, indexValue := range indexValues {
|
||||
set := index[indexValue]
|
||||
if set != nil {
|
||||
set.Delete(key)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *threadSafeMap) Resync() error {
|
||||
// Nothing to do
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewThreadSafeStore(indexers Indexers, indices Indices) ThreadSafeStore {
|
||||
return &threadSafeMap{
|
||||
items: map[string]interface{}{},
|
||||
indexers: indexers,
|
||||
indices: indices,
|
||||
}
|
||||
}
|
|
@ -0,0 +1,83 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
// UndeltaStore listens to incremental updates and sends complete state on every change.
|
||||
// It implements the Store interface so that it can receive a stream of mirrored objects
|
||||
// from Reflector. Whenever it receives any complete (Store.Replace) or incremental change
|
||||
// (Store.Add, Store.Update, Store.Delete), it sends the complete state by calling PushFunc.
|
||||
// It is thread-safe. It guarantees that every change (Add, Update, Replace, Delete) results
|
||||
// in one call to PushFunc, but sometimes PushFunc may be called twice with the same values.
|
||||
// PushFunc should be thread safe.
|
||||
type UndeltaStore struct {
|
||||
Store
|
||||
PushFunc func([]interface{})
|
||||
}
|
||||
|
||||
// Assert that it implements the Store interface.
|
||||
var _ Store = &UndeltaStore{}
|
||||
|
||||
// Note about thread safety. The Store implementation (cache.cache) uses a lock for all methods.
|
||||
// In the functions below, the lock gets released and reacquired betweend the {Add,Delete,etc}
|
||||
// and the List. So, the following can happen, resulting in two identical calls to PushFunc.
|
||||
// time thread 1 thread 2
|
||||
// 0 UndeltaStore.Add(a)
|
||||
// 1 UndeltaStore.Add(b)
|
||||
// 2 Store.Add(a)
|
||||
// 3 Store.Add(b)
|
||||
// 4 Store.List() -> [a,b]
|
||||
// 5 Store.List() -> [a,b]
|
||||
|
||||
func (u *UndeltaStore) Add(obj interface{}) error {
|
||||
if err := u.Store.Add(obj); err != nil {
|
||||
return err
|
||||
}
|
||||
u.PushFunc(u.Store.List())
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *UndeltaStore) Update(obj interface{}) error {
|
||||
if err := u.Store.Update(obj); err != nil {
|
||||
return err
|
||||
}
|
||||
u.PushFunc(u.Store.List())
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *UndeltaStore) Delete(obj interface{}) error {
|
||||
if err := u.Store.Delete(obj); err != nil {
|
||||
return err
|
||||
}
|
||||
u.PushFunc(u.Store.List())
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *UndeltaStore) Replace(list []interface{}, resourceVersion string) error {
|
||||
if err := u.Store.Replace(list, resourceVersion); err != nil {
|
||||
return err
|
||||
}
|
||||
u.PushFunc(u.Store.List())
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewUndeltaStore returns an UndeltaStore implemented with a Store.
|
||||
func NewUndeltaStore(pushFunc func([]interface{}), keyFunc KeyFunc) *UndeltaStore {
|
||||
return &UndeltaStore{
|
||||
Store: NewStore(keyFunc),
|
||||
PushFunc: pushFunc,
|
||||
}
|
||||
}
|
|
@ -0,0 +1,118 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package pager
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
const defaultPageSize = 500
|
||||
|
||||
// ListPageFunc returns a list object for the given list options.
|
||||
type ListPageFunc func(ctx context.Context, opts metav1.ListOptions) (runtime.Object, error)
|
||||
|
||||
// SimplePageFunc adapts a context-less list function into one that accepts a context.
|
||||
func SimplePageFunc(fn func(opts metav1.ListOptions) (runtime.Object, error)) ListPageFunc {
|
||||
return func(ctx context.Context, opts metav1.ListOptions) (runtime.Object, error) {
|
||||
return fn(opts)
|
||||
}
|
||||
}
|
||||
|
||||
// ListPager assists client code in breaking large list queries into multiple
|
||||
// smaller chunks of PageSize or smaller. PageFn is expected to accept a
|
||||
// metav1.ListOptions that supports paging and return a list. The pager does
|
||||
// not alter the field or label selectors on the initial options list.
|
||||
type ListPager struct {
|
||||
PageSize int64
|
||||
PageFn ListPageFunc
|
||||
|
||||
FullListIfExpired bool
|
||||
}
|
||||
|
||||
// New creates a new pager from the provided pager function using the default
|
||||
// options. It will fall back to a full list if an expiration error is encountered
|
||||
// as a last resort.
|
||||
func New(fn ListPageFunc) *ListPager {
|
||||
return &ListPager{
|
||||
PageSize: defaultPageSize,
|
||||
PageFn: fn,
|
||||
FullListIfExpired: true,
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: introduce other types of paging functions - such as those that retrieve from a list
|
||||
// of namespaces.
|
||||
|
||||
// List returns a single list object, but attempts to retrieve smaller chunks from the
|
||||
// server to reduce the impact on the server. If the chunk attempt fails, it will load
|
||||
// the full list instead. The Limit field on options, if unset, will default to the page size.
|
||||
func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) {
|
||||
if options.Limit == 0 {
|
||||
options.Limit = p.PageSize
|
||||
}
|
||||
var list *metainternalversion.List
|
||||
for {
|
||||
obj, err := p.PageFn(ctx, options)
|
||||
if err != nil {
|
||||
if !errors.IsResourceExpired(err) || !p.FullListIfExpired {
|
||||
return nil, err
|
||||
}
|
||||
// the list expired while we were processing, fall back to a full list
|
||||
options.Limit = 0
|
||||
options.Continue = ""
|
||||
return p.PageFn(ctx, options)
|
||||
}
|
||||
m, err := meta.ListAccessor(obj)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("returned object must be a list: %v", err)
|
||||
}
|
||||
|
||||
// exit early and return the object we got if we haven't processed any pages
|
||||
if len(m.GetContinue()) == 0 && list == nil {
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
// initialize the list and fill its contents
|
||||
if list == nil {
|
||||
list = &metainternalversion.List{Items: make([]runtime.Object, 0, options.Limit+1)}
|
||||
list.ResourceVersion = m.GetResourceVersion()
|
||||
list.SelfLink = m.GetSelfLink()
|
||||
}
|
||||
if err := meta.EachListItem(obj, func(obj runtime.Object) error {
|
||||
list.Items = append(list.Items, obj)
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// if we have no more items, return the list
|
||||
if len(m.GetContinue()) == 0 {
|
||||
return list, nil
|
||||
}
|
||||
|
||||
// set the next loop up
|
||||
options.Continue = m.GetContinue()
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue