update vendor.conf

Signed-off-by: Tibor Vass <tibor@docker.com>
This commit is contained in:
Tibor Vass 2017-05-03 19:27:10 -07:00
parent 03a36f9d50
commit b141fa3799
28 changed files with 685 additions and 3116 deletions

View File

@ -6,7 +6,7 @@ github.com/agl/ed25519 d2b94fd789ea21d12fac1a4443dd3a3f79cda72c
github.com/coreos/etcd 824277cb3a577a0e8c829ca9ec557b973fe06d20
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
github.com/docker/distribution b38e5838b7b2f2ad48e06ec4b500011976080621
github.com/docker/docker f33f2578881af36dc403a2063a00b19a4844fcbe
github.com/docker/docker bf5cf84534f1eac9805c6f1bffff341358806b57
github.com/docker/docker-credential-helpers v0.5.0
github.com/docker/go d30aec9fd63c35133f8f79c3412ad91a3b08be06
github.com/docker/go-connections e15c02316c12de00874640cd76311849de2aeed5
@ -17,7 +17,6 @@ github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a
github.com/docker/notary v0.4.2
github.com/docker/swarmkit b19d028de0a6e9ca281afeb76cea2544b9edd839
github.com/flynn-archive/go-shlex 3f9db97f856818214da2e1057f8ad84803971cff
github.com/go-check/check 4ed411733c5785b40214c70bce814c3a3a689609 https://github.com/cpuguy83/check.git
github.com/gogo/protobuf 7efa791bd276fd4db00867cbd982b552627c24cb
github.com/golang/protobuf 8ee79997227bf9b34611aee7946ae64735e6fd93
github.com/gorilla/context v1.1
@ -28,6 +27,7 @@ github.com/miekg/pkcs11 df8ae6ca730422dba20c768ff38ef7d79077a59f
github.com/mitchellh/mapstructure f3009df150dadf309fdee4a54ed65c124afad715
github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb
github.com/opencontainers/runc 9c2d8d184e5da67c95d601382adf14862e4f2228 https://github.com/docker/runc.git
github.com/opencontainers/selinux ba1aefe8057f1d0cfb8e88d0ec1dc85925ef987d
github.com/pkg/errors 839d9e913e063e28dfd0e6c7b7512793e0a48be9
github.com/pmezard/go-difflib 792786c7400a136282c1664665ae0a8db921c6c2
github.com/spf13/cobra v1.5.1 https://github.com/dnephin/cobra.git
@ -43,3 +43,4 @@ golang.org/x/text f72d8390a633d5dfb0cc84043294db9f6c935756
golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb
google.golang.org/grpc v1.0.4
gopkg.in/yaml.v2 4c78c975fe7c825c6d1466c42be594d1d6f3aba6

View File

@ -1,270 +1,80 @@
Docker: the container engine [![Release](https://img.shields.io/github/release/docker/docker.svg)](https://github.com/docker/docker/releases/latest)
============================
### Docker users, see [Moby and Docker](https://mobyproject.org/#moby-and-docker) to clarify the relationship between the projects
Docker is an open source project to pack, ship and run any application
as a lightweight container.
### Docker maintainers and contributors, see [Transitioning to Moby](#transitioning-to-moby) for more details
Docker containers are both *hardware-agnostic* and *platform-agnostic*.
This means they can run anywhere, from your laptop to the largest
cloud compute instance and everything in between - and they don't require
you to use a particular language, framework or packaging system. That
makes them great building blocks for deploying and scaling web apps,
databases, and backend services without depending on a particular stack
or provider.
The Moby Project
================
Docker began as an open-source implementation of the deployment engine which
powered [dotCloud](http://web.archive.org/web/20130530031104/https://www.dotcloud.com/),
a popular Platform-as-a-Service. It benefits directly from the experience
accumulated over several years of large-scale operation and support of hundreds
of thousands of applications and databases.
![Moby Project logo](docs/static_files/moby-project-logo.png "The Moby Project")
![Docker logo](docs/static_files/docker-logo-compressed.png "Docker")
Moby is an open-source project created by Docker to advance the software containerization movement.
It provides a “Lego set” of dozens of components, the framework for assembling them into custom container-based systems, and a place for all container enthusiasts to experiment and exchange ideas.
## Security Disclosure
# Moby
Security is very important to us. If you have any issue regarding security,
please disclose the information responsibly by sending an email to
security@docker.com and not by creating a GitHub issue.
## Overview
## Better than VMs
At the core of Moby is a framework to assemble specialized container systems.
It provides:
A common method for distributing applications and sandboxing their
execution is to use virtual machines, or VMs. Typical VM formats are
VMware's vmdk, Oracle VirtualBox's vdi, and Amazon EC2's ami. In theory
these formats should allow every developer to automatically package
their application into a "machine" for easy distribution and deployment.
In practice, that almost never happens, for a few reasons:
- A library of containerized components for all vital aspects of a container system: OS, container runtime, orchestration, infrastructure management, networking, storage, security, build, image distribution, etc.
- Tools to assemble the components into runnable artifacts for a variety of platforms and architectures: bare metal (both x86 and Arm); executables for Linux, Mac and Windows; VM images for popular cloud and virtualization providers.
- A set of reference assemblies which can be used as-is, modified, or used as inspiration to create your own.
* *Size*: VMs are very large which makes them impractical to store
and transfer.
* *Performance*: running VMs consumes significant CPU and memory,
which makes them impractical in many scenarios, for example local
development of multi-tier applications, and large-scale deployment
of cpu and memory-intensive applications on large numbers of
machines.
* *Portability*: competing VM environments don't play well with each
other. Although conversion tools do exist, they are limited and
add even more overhead.
* *Hardware-centric*: VMs were designed with machine operators in
mind, not software developers. As a result, they offer very
limited tooling for what developers need most: building, testing
and running their software. For example, VMs offer no facilities
for application versioning, monitoring, configuration, logging or
service discovery.
All Moby components are containers, so creating new components is as easy as building a new OCI-compatible container.
By contrast, Docker relies on a different sandboxing method known as
*containerization*. Unlike traditional virtualization, containerization
takes place at the kernel level. Most modern operating system kernels
now support the primitives necessary for containerization, including
Linux with [openvz](https://openvz.org),
[vserver](http://linux-vserver.org) and more recently
[lxc](https://linuxcontainers.org/), Solaris with
[zones](https://docs.oracle.com/cd/E26502_01/html/E29024/preface-1.html#scrolltoc),
and FreeBSD with
[Jails](https://www.freebsd.org/doc/handbook/jails.html).
## Principles
Docker builds on top of these low-level primitives to offer developers a
portable format and runtime environment that solves all four problems.
Docker containers are small (and their transfer can be optimized with
layers), they have basically zero memory and cpu overhead, they are
completely portable, and are designed from the ground up with an
application-centric design.
Moby is an open project guided by strong principles, but modular, flexible and without too strong an opinion on user experience, so it is open to the community to help set its direction.
The guiding principles are:
Perhaps best of all, because Docker operates at the OS level, it can still be
run inside a VM!
- Batteries included but swappable: Moby includes enough components to build fully featured container system, but its modular architecture ensures that most of the components can be swapped by different implementations.
- Usable security: Moby will provide secure defaults without compromising usability.
- Container centric: Moby is built with containers, for running containers.
## Plays well with others
With Moby, you should be able to describe all the components of your distributed application, from the high-level configuration files down to the kernel you would like to use and build and deploy it easily.
Docker does not require you to buy into a particular programming
language, framework, packaging system, or configuration language.
Moby uses [containerd](https://github.com/containerd/containerd) as the default container runtime.
Is your application a Unix process? Does it use files, tcp connections,
environment variables, standard Unix streams and command-line arguments
as inputs and outputs? Then Docker can run it.
## Audience
Can your application's build be expressed as a sequence of such
commands? Then Docker can build it.
Moby is recommended for anyone who wants to assemble a container-based system. This includes:
## Escape dependency hell
- Hackers who want to customize or patch their Docker build
- System engineers or integrators building a container system
- Infrastructure providers looking to adapt existing container systems to their environment
- Container enthusiasts who want to experiment with the latest container tech
- Open-source developers looking to test their project in a variety of different systems
- Anyone curious about Docker internals and how its built
A common problem for developers is the difficulty of managing all
their application's dependencies in a simple and automated way.
Moby is NOT recommended for:
This is usually difficult for several reasons:
- Application developers looking for an easy way to run their applications in containers. We recommend Docker CE instead.
- Enterprise IT and development teams looking for a ready-to-use, commercially supported container platform. We recommend Docker EE instead.
- Anyone curious about containers and looking for an easy way to learn. We recommend the docker.com website instead.
* *Cross-platform dependencies*. Modern applications often depend on
a combination of system libraries and binaries, language-specific
packages, framework-specific modules, internal components
developed for another project, etc. These dependencies live in
different "worlds" and require different tools - these tools
typically don't work well with each other, requiring awkward
custom integrations.
# Transitioning to Moby
* *Conflicting dependencies*. Different applications may depend on
different versions of the same dependency. Packaging tools handle
these situations with various degrees of ease - but they all
handle them in different and incompatible ways, which again forces
the developer to do extra work.
Docker is transitioning all of its open source collaborations to the Moby project going forward.
During the transition, all open source activity should continue as usual.
* *Custom dependencies*. A developer may need to prepare a custom
version of their application's dependency. Some packaging systems
can handle custom versions of a dependency, others can't - and all
of them handle it differently.
We are proposing the following list of changes:
- splitting up the engine into more open components
- removing the docker UI, SDK etc to keep them in the Docker org
- clarifying that the project is not limited to the engine, but to the assembly of all the individual components of the Docker platform
- open-source new tools & components which we currently use to assemble the Docker product, but could benefit the community
- defining an open, community-centric governance inspired by the Fedora project (a very successful example of balancing the needs of the community with the constraints of the primary corporate sponsor)
Docker solves the problem of dependency hell by giving developers a simple
way to express *all* their application's dependencies in one place, while
streamlining the process of assembling them. If this makes you think of
[XKCD 927](https://xkcd.com/927/), don't worry. Docker doesn't
*replace* your favorite packaging systems. It simply orchestrates
their use in a simple and repeatable way. How does it do that? With
layers.
-----
Docker defines a build as running a sequence of Unix commands, one
after the other, in the same container. Build commands modify the
contents of the container (usually by installing new files on the
filesystem), the next command modifies it some more, etc. Since each
build command inherits the result of the previous commands, the
*order* in which the commands are executed expresses *dependencies*.
Here's a typical Docker build process:
```bash
FROM ubuntu:12.04
RUN apt-get update && apt-get install -y python python-pip curl
RUN curl -sSL https://github.com/shykes/helloflask/archive/master.tar.gz | tar -xzv
RUN cd helloflask-master && pip install -r requirements.txt
```
Note that Docker doesn't care *how* dependencies are built - as long
as they can be built by running a Unix command in a container.
Getting started
===============
Docker can be installed either on your computer for building applications or
on servers for running them. To get started, [check out the installation
instructions in the
documentation](https://docs.docker.com/engine/installation/).
Usage examples
==============
Docker can be used to run short-lived commands, long-running daemons
(app servers, databases, etc.), interactive shell sessions, etc.
You can find a [list of real-world
examples](https://docs.docker.com/engine/examples/) in the
documentation.
Under the hood
--------------
Under the hood, Docker is built on the following components:
* The
[cgroups](https://www.kernel.org/doc/Documentation/cgroup-v1/cgroups.txt)
and
[namespaces](http://man7.org/linux/man-pages/man7/namespaces.7.html)
capabilities of the Linux kernel
* The [Go](https://golang.org) programming language
* The [Docker Image Specification](https://github.com/docker/docker/blob/master/image/spec/v1.md)
* The [Libcontainer Specification](https://github.com/opencontainers/runc/blob/master/libcontainer/SPEC.md)
Contributing to Docker [![GoDoc](https://godoc.org/github.com/docker/docker?status.svg)](https://godoc.org/github.com/docker/docker)
======================
| **Master** (Linux) | **Experimental** (Linux) | **Windows** | **FreeBSD** |
|------------------|----------------------|---------|---------|
| [![Jenkins Build Status](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master/badge/icon)](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master/) | [![Jenkins Build Status](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master%20%28experimental%29/badge/icon)](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master%20%28experimental%29/) | [![Build Status](http://jenkins.dockerproject.org/job/Docker%20Master%20(windows)/badge/icon)](http://jenkins.dockerproject.org/job/Docker%20Master%20(windows)/) | [![Build Status](http://jenkins.dockerproject.org/job/Docker%20Master%20(freebsd)/badge/icon)](http://jenkins.dockerproject.org/job/Docker%20Master%20(freebsd)/) |
Want to hack on Docker? Awesome! We have [instructions to help you get
started contributing code or documentation](https://docs.docker.com/opensource/project/who-written-for/).
These instructions are probably not perfect, please let us know if anything
feels wrong or incomplete. Better yet, submit a PR and improve them yourself.
Getting the development builds
==============================
Want to run Docker from a master build? You can download
master builds at [master.dockerproject.org](https://master.dockerproject.org).
They are updated with each commit merged into the master branch.
Don't know how to use that super cool new feature in the master build? Check
out the master docs at
[docs.master.dockerproject.org](http://docs.master.dockerproject.org).
How the project is run
======================
Docker is a very, very active project. If you want to learn more about how it is run,
or want to get more involved, the best place to start is [the project directory](https://github.com/docker/docker/tree/master/project).
We are always open to suggestions on process improvements, and are always looking for more maintainers.
### Talking to other Docker users and contributors
<table class="tg">
<col width="45%">
<col width="65%">
<tr>
<td>Internet&nbsp;Relay&nbsp;Chat&nbsp;(IRC)</td>
<td>
<p>
IRC is a direct line to our most knowledgeable Docker users; we have
both the <code>#docker</code> and <code>#docker-dev</code> group on
<strong>irc.freenode.net</strong>.
IRC is a rich chat protocol but it can overwhelm new users. You can search
<a href="https://botbot.me/freenode/docker/#" target="_blank">our chat archives</a>.
</p>
Read our <a href="https://docs.docker.com/opensource/get-help/#/irc-quickstart" target="_blank">IRC quickstart guide</a> for an easy way to get started.
</td>
</tr>
<tr>
<td>Docker Community Forums</td>
<td>
The <a href="https://forums.docker.com/c/open-source-projects/de" target="_blank">Docker Engine</a>
group is for users of the Docker Engine project.
</td>
</tr>
<tr>
<td>Google Groups</td>
<td>
The <a href="https://groups.google.com/forum/#!forum/docker-dev"
target="_blank">docker-dev</a> group is for contributors and other people
contributing to the Docker project. You can join this group without a
Google account by sending an email to <a
href="mailto:docker-dev+subscribe@googlegroups.com">docker-dev+subscribe@googlegroups.com</a>.
You'll receive a join-request message; simply reply to the message to
confirm your subscription.
</td>
</tr>
<tr>
<td>Twitter</td>
<td>
You can follow <a href="https://twitter.com/docker/" target="_blank">Docker's Twitter feed</a>
to get updates on our products. You can also tweet us questions or just
share blogs or stories.
</td>
</tr>
<tr>
<td>Stack Overflow</td>
<td>
Stack Overflow has thousands of Docker questions listed. We regularly
monitor <a href="https://stackoverflow.com/search?tab=newest&q=docker" target="_blank">Docker questions</a>
and so do many other knowledgeable Docker users.
</td>
</tr>
</table>
### Legal
Legal
=====
*Brought to you courtesy of our legal counsel. For more context,
please see the [NOTICE](https://github.com/docker/docker/blob/master/NOTICE) document in this repo.*
please see the [NOTICE](https://github.com/moby/moby/blob/master/NOTICE) document in this repo.*
Use and transfer of Docker may be subject to certain restrictions by the
Use and transfer of Moby may be subject to certain restrictions by the
United States and other governments.
It is your responsibility to ensure that your use and/or transfer does not
@ -275,30 +85,6 @@ For more information, please see https://www.bis.doc.gov
Licensing
=========
Docker is licensed under the Apache License, Version 2.0. See
[LICENSE](https://github.com/docker/docker/blob/master/LICENSE) for the full
Moby is licensed under the Apache License, Version 2.0. See
[LICENSE](https://github.com/moby/moby/blob/master/LICENSE) for the full
license text.
Other Docker Related Projects
=============================
There are a number of projects under development that are based on Docker's
core technology. These projects expand the tooling built around the
Docker platform to broaden its application and utility.
* [Docker Registry](https://github.com/docker/distribution): Registry
server for Docker (hosting/delivery of repositories and images)
* [Docker Machine](https://github.com/docker/machine): Machine management
for a container-centric world
* [Docker Swarm](https://github.com/docker/swarm): A Docker-native clustering
system
* [Docker Compose](https://github.com/docker/compose) (formerly Fig):
Define and run multi-container apps
* [Kitematic](https://github.com/docker/kitematic): The easiest way to use
Docker on Mac and Windows
If you know of another project underway that should be listed here, please help
us keep this list up-to-date by submitting a PR.
Awesome-Docker
==============
You can find more projects, tools and articles related to Docker on the [awesome-docker list](https://github.com/veggiemonk/awesome-docker). Add your project there.

View File

@ -14,8 +14,8 @@ It consists of various components in this repository:
The API is defined by the [Swagger](http://swagger.io/specification/) definition in `api/swagger.yaml`. This definition can be used to:
1. To automatically generate documentation.
2. To automatically generate the Go server and client. (A work-in-progress.)
1. Automatically generate documentation.
2. Automatically generate the Go server and client. (A work-in-progress.)
3. Provide a machine readable version of the API for introspecting what it can do, automatically generating clients for other languages, etc.
## Updating the API documentation

View File

@ -7,6 +7,12 @@ import (
"github.com/docker/go-connections/nat"
)
// MinimumDuration puts a minimum on user configured duration.
// This is to prevent API error on time unit. For example, API may
// set 3 as healthcheck interval with intention of 3 seconds, but
// Docker interprets it as 3 nanoseconds.
const MinimumDuration = 1 * time.Millisecond
// HealthConfig holds configuration settings for the HEALTHCHECK feature.
type HealthConfig struct {
// Test is the test to perform to check that the container is healthy.

View File

@ -377,7 +377,4 @@ type HostConfig struct {
// Run a custom init inside the container, if null, use the daemon's configured settings
Init *bool `json:",omitempty"`
// Custom init path
InitPath string `json:",omitempty"`
}

View File

@ -126,12 +126,17 @@ type ExternalCA struct {
// Options is a set of additional key/value pairs whose interpretation
// depends on the specified CA type.
Options map[string]string `json:",omitempty"`
// CACert specifies which root CA is used by this external CA. This certificate must
// be in PEM format.
CACert string
}
// InitRequest is the request used to init a swarm.
type InitRequest struct {
ListenAddr string
AdvertiseAddr string
DataPathAddr string
ForceNewCluster bool
Spec Spec
AutoLockManagers bool
@ -142,6 +147,7 @@ type InitRequest struct {
type JoinRequest struct {
ListenAddr string
AdvertiseAddr string
DataPathAddr string
RemoteAddrs []string
JoinToken string // accept by secret
Availability NodeAvailability

View File

@ -238,6 +238,8 @@ type PluginsInfo struct {
Network []string
// List of Authorization plugins registered
Authorization []string
// List of Log plugins registered
Log []string
}
// ExecStartCheck is a temp struct used by execStart

View File

@ -10,91 +10,76 @@ import (
"github.com/docker/docker/pkg/progress"
)
// StreamFormatter formats a stream, optionally using JSON.
type StreamFormatter struct {
json bool
}
// NewStreamFormatter returns a simple StreamFormatter
func NewStreamFormatter() *StreamFormatter {
return &StreamFormatter{}
}
// NewJSONStreamFormatter returns a StreamFormatter configured to stream json
func NewJSONStreamFormatter() *StreamFormatter {
return &StreamFormatter{true}
}
const streamNewline = "\r\n"
var streamNewlineBytes = []byte(streamNewline)
type jsonProgressFormatter struct{}
// FormatStream formats the specified stream.
func (sf *StreamFormatter) FormatStream(str string) []byte {
if sf.json {
b, err := json.Marshal(&jsonmessage.JSONMessage{Stream: str})
if err != nil {
return sf.FormatError(err)
}
return append(b, streamNewlineBytes...)
}
return []byte(str + "\r")
func appendNewline(source []byte) []byte {
return append(source, []byte(streamNewline)...)
}
// FormatStatus formats the specified objects according to the specified format (and id).
func (sf *StreamFormatter) FormatStatus(id, format string, a ...interface{}) []byte {
func FormatStatus(id, format string, a ...interface{}) []byte {
str := fmt.Sprintf(format, a...)
if sf.json {
b, err := json.Marshal(&jsonmessage.JSONMessage{ID: id, Status: str})
if err != nil {
return sf.FormatError(err)
}
return append(b, streamNewlineBytes...)
b, err := json.Marshal(&jsonmessage.JSONMessage{ID: id, Status: str})
if err != nil {
return FormatError(err)
}
return []byte(str + streamNewline)
return appendNewline(b)
}
// FormatError formats the specified error.
func (sf *StreamFormatter) FormatError(err error) []byte {
if sf.json {
jsonError, ok := err.(*jsonmessage.JSONError)
if !ok {
jsonError = &jsonmessage.JSONError{Message: err.Error()}
}
if b, err := json.Marshal(&jsonmessage.JSONMessage{Error: jsonError, ErrorMessage: err.Error()}); err == nil {
return append(b, streamNewlineBytes...)
}
return []byte("{\"error\":\"format error\"}" + streamNewline)
// FormatError formats the error as a JSON object
func FormatError(err error) []byte {
jsonError, ok := err.(*jsonmessage.JSONError)
if !ok {
jsonError = &jsonmessage.JSONError{Message: err.Error()}
}
return []byte("Error: " + err.Error() + streamNewline)
if b, err := json.Marshal(&jsonmessage.JSONMessage{Error: jsonError, ErrorMessage: err.Error()}); err == nil {
return appendNewline(b)
}
return []byte(`{"error":"format error"}` + streamNewline)
}
// FormatProgress formats the progress information for a specified action.
func (sf *StreamFormatter) FormatProgress(id, action string, progress *jsonmessage.JSONProgress, aux interface{}) []byte {
func (sf *jsonProgressFormatter) formatStatus(id, format string, a ...interface{}) []byte {
return FormatStatus(id, format, a...)
}
// formatProgress formats the progress information for a specified action.
func (sf *jsonProgressFormatter) formatProgress(id, action string, progress *jsonmessage.JSONProgress, aux interface{}) []byte {
if progress == nil {
progress = &jsonmessage.JSONProgress{}
}
if sf.json {
var auxJSON *json.RawMessage
if aux != nil {
auxJSONBytes, err := json.Marshal(aux)
if err != nil {
return nil
}
auxJSON = new(json.RawMessage)
*auxJSON = auxJSONBytes
}
b, err := json.Marshal(&jsonmessage.JSONMessage{
Status: action,
ProgressMessage: progress.String(),
Progress: progress,
ID: id,
Aux: auxJSON,
})
var auxJSON *json.RawMessage
if aux != nil {
auxJSONBytes, err := json.Marshal(aux)
if err != nil {
return nil
}
return append(b, streamNewlineBytes...)
auxJSON = new(json.RawMessage)
*auxJSON = auxJSONBytes
}
b, err := json.Marshal(&jsonmessage.JSONMessage{
Status: action,
ProgressMessage: progress.String(),
Progress: progress,
ID: id,
Aux: auxJSON,
})
if err != nil {
return nil
}
return appendNewline(b)
}
type rawProgressFormatter struct{}
func (sf *rawProgressFormatter) formatStatus(id, format string, a ...interface{}) []byte {
return []byte(fmt.Sprintf(format, a...) + streamNewline)
}
func (sf *rawProgressFormatter) formatProgress(id, action string, progress *jsonmessage.JSONProgress, aux interface{}) []byte {
if progress == nil {
progress = &jsonmessage.JSONProgress{}
}
endl := "\r"
if progress.String() == "" {
@ -105,16 +90,23 @@ func (sf *StreamFormatter) FormatProgress(id, action string, progress *jsonmessa
// NewProgressOutput returns a progress.Output object that can be passed to
// progress.NewProgressReader.
func (sf *StreamFormatter) NewProgressOutput(out io.Writer, newLines bool) progress.Output {
return &progressOutput{
sf: sf,
out: out,
newLines: newLines,
}
func NewProgressOutput(out io.Writer) progress.Output {
return &progressOutput{sf: &rawProgressFormatter{}, out: out, newLines: true}
}
// NewJSONProgressOutput returns a progress.Output that that formats output
// using JSON objects
func NewJSONProgressOutput(out io.Writer, newLines bool) progress.Output {
return &progressOutput{sf: &jsonProgressFormatter{}, out: out, newLines: newLines}
}
type formatProgress interface {
formatStatus(id, format string, a ...interface{}) []byte
formatProgress(id, action string, progress *jsonmessage.JSONProgress, aux interface{}) []byte
}
type progressOutput struct {
sf *StreamFormatter
sf formatProgress
out io.Writer
newLines bool
}
@ -123,10 +115,10 @@ type progressOutput struct {
func (out *progressOutput) WriteProgress(prog progress.Progress) error {
var formatted []byte
if prog.Message != "" {
formatted = out.sf.FormatStatus(prog.ID, prog.Message)
formatted = out.sf.formatStatus(prog.ID, prog.Message)
} else {
jsonProgress := jsonmessage.JSONProgress{Current: prog.Current, Total: prog.Total, HideCounts: prog.HideCounts}
formatted = out.sf.FormatProgress(prog.ID, prog.Action, &jsonProgress, prog.Aux)
formatted = out.sf.formatProgress(prog.ID, prog.Action, &jsonProgress, prog.Aux)
}
_, err := out.out.Write(formatted)
if err != nil {
@ -134,39 +126,9 @@ func (out *progressOutput) WriteProgress(prog progress.Progress) error {
}
if out.newLines && prog.LastUpdate {
_, err = out.out.Write(out.sf.FormatStatus("", ""))
_, err = out.out.Write(out.sf.formatStatus("", ""))
return err
}
return nil
}
// StdoutFormatter is a streamFormatter that writes to the standard output.
type StdoutFormatter struct {
io.Writer
*StreamFormatter
}
func (sf *StdoutFormatter) Write(buf []byte) (int, error) {
formattedBuf := sf.StreamFormatter.FormatStream(string(buf))
n, err := sf.Writer.Write(formattedBuf)
if n != len(formattedBuf) {
return n, io.ErrShortWrite
}
return len(buf), err
}
// StderrFormatter is a streamFormatter that writes to the standard error.
type StderrFormatter struct {
io.Writer
*StreamFormatter
}
func (sf *StderrFormatter) Write(buf []byte) (int, error) {
formattedBuf := sf.StreamFormatter.FormatStream("\033[91m" + string(buf) + "\033[0m")
n, err := sf.Writer.Write(formattedBuf)
if n != len(formattedBuf) {
return n, io.ErrShortWrite
}
return len(buf), err
}

View File

@ -0,0 +1,47 @@
package streamformatter
import (
"encoding/json"
"io"
"github.com/docker/docker/pkg/jsonmessage"
)
type streamWriter struct {
io.Writer
lineFormat func([]byte) string
}
func (sw *streamWriter) Write(buf []byte) (int, error) {
formattedBuf := sw.format(buf)
n, err := sw.Writer.Write(formattedBuf)
if n != len(formattedBuf) {
return n, io.ErrShortWrite
}
return len(buf), err
}
func (sw *streamWriter) format(buf []byte) []byte {
msg := &jsonmessage.JSONMessage{Stream: sw.lineFormat(buf)}
b, err := json.Marshal(msg)
if err != nil {
return FormatError(err)
}
return appendNewline(b)
}
// NewStdoutWriter returns a writer which formats the output as json message
// representing stdout lines
func NewStdoutWriter(out io.Writer) io.Writer {
return &streamWriter{Writer: out, lineFormat: func(buf []byte) string {
return string(buf)
}}
}
// NewStderrWriter returns a writer which formats the output as json message
// representing stderr lines
func NewStderrWriter(out io.Writer) io.Writer {
return &streamWriter{Writer: out, lineFormat: func(buf []byte) string {
return "\033[91m" + string(buf) + "\033[0m"
}}
}

View File

@ -1,307 +0,0 @@
package cmd
import (
"bytes"
"fmt"
"io"
"os/exec"
"path/filepath"
"runtime"
"strings"
"sync"
"time"
"github.com/docker/docker/pkg/system"
"github.com/go-check/check"
)
type testingT interface {
Fatalf(string, ...interface{})
}
const (
// None is a token to inform Result.Assert that the output should be empty
None string = "<NOTHING>"
)
type lockedBuffer struct {
m sync.RWMutex
buf bytes.Buffer
}
func (buf *lockedBuffer) Write(b []byte) (int, error) {
buf.m.Lock()
defer buf.m.Unlock()
return buf.buf.Write(b)
}
func (buf *lockedBuffer) String() string {
buf.m.RLock()
defer buf.m.RUnlock()
return buf.buf.String()
}
// Result stores the result of running a command
type Result struct {
Cmd *exec.Cmd
ExitCode int
Error error
// Timeout is true if the command was killed because it ran for too long
Timeout bool
outBuffer *lockedBuffer
errBuffer *lockedBuffer
}
// Assert compares the Result against the Expected struct, and fails the test if
// any of the expcetations are not met.
func (r *Result) Assert(t testingT, exp Expected) *Result {
err := r.Compare(exp)
if err == nil {
return r
}
_, file, line, ok := runtime.Caller(1)
if ok {
t.Fatalf("at %s:%d - %s", filepath.Base(file), line, err.Error())
} else {
t.Fatalf("(no file/line info) - %s", err.Error())
}
return nil
}
// Compare returns a formatted error with the command, stdout, stderr, exit
// code, and any failed expectations
func (r *Result) Compare(exp Expected) error {
errors := []string{}
add := func(format string, args ...interface{}) {
errors = append(errors, fmt.Sprintf(format, args...))
}
if exp.ExitCode != r.ExitCode {
add("ExitCode was %d expected %d", r.ExitCode, exp.ExitCode)
}
if exp.Timeout != r.Timeout {
if exp.Timeout {
add("Expected command to timeout")
} else {
add("Expected command to finish, but it hit the timeout")
}
}
if !matchOutput(exp.Out, r.Stdout()) {
add("Expected stdout to contain %q", exp.Out)
}
if !matchOutput(exp.Err, r.Stderr()) {
add("Expected stderr to contain %q", exp.Err)
}
switch {
// If a non-zero exit code is expected there is going to be an error.
// Don't require an error message as well as an exit code because the
// error message is going to be "exit status <code> which is not useful
case exp.Error == "" && exp.ExitCode != 0:
case exp.Error == "" && r.Error != nil:
add("Expected no error")
case exp.Error != "" && r.Error == nil:
add("Expected error to contain %q, but there was no error", exp.Error)
case exp.Error != "" && !strings.Contains(r.Error.Error(), exp.Error):
add("Expected error to contain %q", exp.Error)
}
if len(errors) == 0 {
return nil
}
return fmt.Errorf("%s\nFailures:\n%s\n", r, strings.Join(errors, "\n"))
}
func matchOutput(expected string, actual string) bool {
switch expected {
case None:
return actual == ""
default:
return strings.Contains(actual, expected)
}
}
func (r *Result) String() string {
var timeout string
if r.Timeout {
timeout = " (timeout)"
}
return fmt.Sprintf(`
Command: %s
ExitCode: %d%s
Error: %v
Stdout: %v
Stderr: %v
`,
strings.Join(r.Cmd.Args, " "),
r.ExitCode,
timeout,
r.Error,
r.Stdout(),
r.Stderr())
}
// Expected is the expected output from a Command. This struct is compared to a
// Result struct by Result.Assert().
type Expected struct {
ExitCode int
Timeout bool
Error string
Out string
Err string
}
// Success is the default expected result
var Success = Expected{}
// Stdout returns the stdout of the process as a string
func (r *Result) Stdout() string {
return r.outBuffer.String()
}
// Stderr returns the stderr of the process as a string
func (r *Result) Stderr() string {
return r.errBuffer.String()
}
// Combined returns the stdout and stderr combined into a single string
func (r *Result) Combined() string {
return r.outBuffer.String() + r.errBuffer.String()
}
// SetExitError sets Error and ExitCode based on Error
func (r *Result) SetExitError(err error) {
if err == nil {
return
}
r.Error = err
r.ExitCode = system.ProcessExitCode(err)
}
type matches struct{}
// Info returns the CheckerInfo
func (m *matches) Info() *check.CheckerInfo {
return &check.CheckerInfo{
Name: "CommandMatches",
Params: []string{"result", "expected"},
}
}
// Check compares a result against the expected
func (m *matches) Check(params []interface{}, names []string) (bool, string) {
result, ok := params[0].(*Result)
if !ok {
return false, fmt.Sprintf("result must be a *Result, not %T", params[0])
}
expected, ok := params[1].(Expected)
if !ok {
return false, fmt.Sprintf("expected must be an Expected, not %T", params[1])
}
err := result.Compare(expected)
if err == nil {
return true, ""
}
return false, err.Error()
}
// Matches is a gocheck.Checker for comparing a Result against an Expected
var Matches = &matches{}
// Cmd contains the arguments and options for a process to run as part of a test
// suite.
type Cmd struct {
Command []string
Timeout time.Duration
Stdin io.Reader
Stdout io.Writer
Dir string
Env []string
}
// Command create a simple Cmd with the specified command and arguments
func Command(command string, args ...string) Cmd {
return Cmd{Command: append([]string{command}, args...)}
}
// RunCmd runs a command and returns a Result
func RunCmd(cmd Cmd, cmdOperators ...func(*Cmd)) *Result {
for _, op := range cmdOperators {
op(&cmd)
}
result := StartCmd(cmd)
if result.Error != nil {
return result
}
return WaitOnCmd(cmd.Timeout, result)
}
// RunCommand parses a command line and runs it, returning a result
func RunCommand(command string, args ...string) *Result {
return RunCmd(Command(command, args...))
}
// StartCmd starts a command, but doesn't wait for it to finish
func StartCmd(cmd Cmd) *Result {
result := buildCmd(cmd)
if result.Error != nil {
return result
}
result.SetExitError(result.Cmd.Start())
return result
}
func buildCmd(cmd Cmd) *Result {
var execCmd *exec.Cmd
switch len(cmd.Command) {
case 1:
execCmd = exec.Command(cmd.Command[0])
default:
execCmd = exec.Command(cmd.Command[0], cmd.Command[1:]...)
}
outBuffer := new(lockedBuffer)
errBuffer := new(lockedBuffer)
execCmd.Stdin = cmd.Stdin
execCmd.Dir = cmd.Dir
execCmd.Env = cmd.Env
if cmd.Stdout != nil {
execCmd.Stdout = io.MultiWriter(outBuffer, cmd.Stdout)
} else {
execCmd.Stdout = outBuffer
}
execCmd.Stderr = errBuffer
return &Result{
Cmd: execCmd,
outBuffer: outBuffer,
errBuffer: errBuffer,
}
}
// WaitOnCmd waits for a command to complete. If timeout is non-nil then
// only wait until the timeout.
func WaitOnCmd(timeout time.Duration, result *Result) *Result {
if timeout == time.Duration(0) {
result.SetExitError(result.Cmd.Wait())
return result
}
done := make(chan error, 1)
// Wait for command to exit in a goroutine
go func() {
done <- result.Cmd.Wait()
}()
select {
case <-time.After(timeout):
killErr := result.Cmd.Process.Kill()
if killErr != nil {
fmt.Printf("failed to kill (pid=%d): %v\n", result.Cmd.Process.Pid, killErr)
}
result.Timeout = true
case err := <-done:
result.SetExitError(err)
}
return result
}

View File

@ -16,7 +16,6 @@ import (
"github.com/docker/docker/pkg/stringutils"
"github.com/docker/docker/pkg/system"
icmd "github.com/docker/docker/pkg/testutil/cmd"
)
// IsKilled process the specified error and returns whether the process was killed or not.
@ -212,20 +211,6 @@ func (c *ChannelBuffer) ReadTimeout(p []byte, n time.Duration) (int, error) {
}
}
// RunAtDifferentDate runs the specified function with the given time.
// It changes the date of the system, which can led to weird behaviors.
func RunAtDifferentDate(date time.Time, block func()) {
// Layout for date. MMDDhhmmYYYY
const timeLayout = "010203042006"
// Ensure we bring time back to now
now := time.Now().Format(timeLayout)
defer icmd.RunCommand("date", now)
icmd.RunCommand("date", date.Format(timeLayout))
block()
return
}
// ReadBody read the specified ReadCloser content and returns it
func ReadBody(b io.ReadCloser) ([]byte, error) {
defer b.Close()

View File

@ -1,8 +1,7 @@
# the following lines are in sorted order, FYI
github.com/Azure/go-ansiterm 388960b655244e76e24c75f48631564eaefade62
github.com/Microsoft/hcsshim v0.5.13
# TODO: get rid of this fork once PR https://github.com/Microsoft/go-winio/pull/43 is merged
github.com/Microsoft/go-winio 7c7d6b461cb10872c1138a0d7f3acf9a41b5c353 https://github.com/dgageot/go-winio.git
github.com/Microsoft/go-winio v0.3.9
github.com/Sirupsen/logrus v0.11.0
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a
@ -23,14 +22,17 @@ github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987
github.com/RackSec/srslog 456df3a81436d29ba874f3590eeeee25d666f8a5
github.com/imdario/mergo 0.2.1
golang.org/x/sync de49d9dcd27d4f764488181bea099dfe6179bcf0
#get libnetwork packages
github.com/docker/libnetwork b13e0604016a4944025aaff521d9c125850b0d04
github.com/docker/libnetwork cace103704768d39bd88a23d0df76df125a0e39a
github.com/docker/go-events 18b43f1bc85d9cdd42c05a6cd2d444c7a200a894
github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80
github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
github.com/hashicorp/go-msgpack 71c2886f5a673a35f909803f38ece5810165097b
github.com/hashicorp/memberlist 88ac4de0d1a0ca6def284b571342db3b777a4c37
github.com/hashicorp/memberlist v0.1.0
github.com/sean-/seed e2103e2c35297fb7e17febb81e49b312087a2372
github.com/hashicorp/go-sockaddr acd314c5781ea706c710d9ea70069fd2e110d61d
github.com/hashicorp/go-multierror fcdddc395df1ddf4247c69bd436e84cfa0733f7e
github.com/hashicorp/serf 598c54895cc5a7b1a24a398d635e8c0ea0959870
github.com/docker/libkv 1d8431073ae03cdaedb198a89722f3aab6d418ef
@ -106,7 +108,7 @@ github.com/docker/containerd 9048e5e50717ea4497b757314bad98ea3763c145
github.com/tonistiigi/fifo 1405643975692217d6720f8b54aeee1bf2cd5cf4
# cluster
github.com/docker/swarmkit b19d028de0a6e9ca281afeb76cea2544b9edd839
github.com/docker/swarmkit 61a92e8ec074df5769decda985df4a3ab43c77eb
github.com/gogo/protobuf 8d70fb3182befc465c4a1eac8ad4d38ff49778e2
github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a
github.com/google/certificate-transparency d90e65c3a07988180c5b1ece71791c0b6506826e
@ -142,3 +144,4 @@ github.com/xeipuuv/gojsonpointer e0fe6f68307607d540ed8eac07a342c33fa1b54a
github.com/xeipuuv/gojsonreference e02fc20de94c78484cd5ffb007f8af96be030a45
github.com/xeipuuv/gojsonschema 93e72a773fade158921402d6a24c819b48aba29d
gopkg.in/yaml.v2 4c78c975fe7c825c6d1466c42be594d1d6f3aba6
github.com/opencontainers/selinux ba1aefe8057f1d0cfb8e88d0ec1dc85925ef987d

View File

@ -10,7 +10,7 @@ import (
mounttypes "github.com/docker/docker/api/types/mount"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/stringid"
"github.com/opencontainers/runc/libcontainer/label"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
)
@ -311,10 +311,12 @@ func ParseMountSpec(cfg mounttypes.Mount, options ...func(*validateOpts)) (*Moun
}
case mounttypes.TypeBind:
mp.Source = clean(convertSlash(cfg.Source))
if cfg.BindOptions != nil {
if len(cfg.BindOptions.Propagation) > 0 {
mp.Propagation = cfg.BindOptions.Propagation
}
if cfg.BindOptions != nil && len(cfg.BindOptions.Propagation) > 0 {
mp.Propagation = cfg.BindOptions.Propagation
} else {
// If user did not specify a propagation mode, get
// default propagation mode.
mp.Propagation = DefaultPropagationMode
}
case mounttypes.TypeTmpfs:
// NOP

View File

@ -1,25 +0,0 @@
Gocheck - A rich testing framework for Go
Copyright (c) 2010-2013 Gustavo Niemeyer <gustavo@niemeyer.net>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,10 +0,0 @@
Go-check
========
This is a fork of https://github.com/go-check/check
The intention of this fork is not to change any of the original behavior, but add
some specific behaviors needed for some of my projects already using this test suite.
For documentation on the main behavior of go-check see the aforementioned repo.
The original branch is intact at `orig_v1`

View File

@ -1,187 +0,0 @@
// Copyright (c) 2012 The Go Authors. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package check
import (
"fmt"
"runtime"
"time"
)
var memStats runtime.MemStats
// testingB is a type passed to Benchmark functions to manage benchmark
// timing and to specify the number of iterations to run.
type timer struct {
start time.Time // Time test or benchmark started
duration time.Duration
N int
bytes int64
timerOn bool
benchTime time.Duration
// The initial states of memStats.Mallocs and memStats.TotalAlloc.
startAllocs uint64
startBytes uint64
// The net total of this test after being run.
netAllocs uint64
netBytes uint64
}
// StartTimer starts timing a test. This function is called automatically
// before a benchmark starts, but it can also used to resume timing after
// a call to StopTimer.
func (c *C) StartTimer() {
if !c.timerOn {
c.start = time.Now()
c.timerOn = true
runtime.ReadMemStats(&memStats)
c.startAllocs = memStats.Mallocs
c.startBytes = memStats.TotalAlloc
}
}
// StopTimer stops timing a test. This can be used to pause the timer
// while performing complex initialization that you don't
// want to measure.
func (c *C) StopTimer() {
if c.timerOn {
c.duration += time.Now().Sub(c.start)
c.timerOn = false
runtime.ReadMemStats(&memStats)
c.netAllocs += memStats.Mallocs - c.startAllocs
c.netBytes += memStats.TotalAlloc - c.startBytes
}
}
// ResetTimer sets the elapsed benchmark time to zero.
// It does not affect whether the timer is running.
func (c *C) ResetTimer() {
if c.timerOn {
c.start = time.Now()
runtime.ReadMemStats(&memStats)
c.startAllocs = memStats.Mallocs
c.startBytes = memStats.TotalAlloc
}
c.duration = 0
c.netAllocs = 0
c.netBytes = 0
}
// SetBytes informs the number of bytes that the benchmark processes
// on each iteration. If this is called in a benchmark it will also
// report MB/s.
func (c *C) SetBytes(n int64) {
c.bytes = n
}
func (c *C) nsPerOp() int64 {
if c.N <= 0 {
return 0
}
return c.duration.Nanoseconds() / int64(c.N)
}
func (c *C) mbPerSec() float64 {
if c.bytes <= 0 || c.duration <= 0 || c.N <= 0 {
return 0
}
return (float64(c.bytes) * float64(c.N) / 1e6) / c.duration.Seconds()
}
func (c *C) timerString() string {
if c.N <= 0 {
return fmt.Sprintf("%3.3fs", float64(c.duration.Nanoseconds())/1e9)
}
mbs := c.mbPerSec()
mb := ""
if mbs != 0 {
mb = fmt.Sprintf("\t%7.2f MB/s", mbs)
}
nsop := c.nsPerOp()
ns := fmt.Sprintf("%10d ns/op", nsop)
if c.N > 0 && nsop < 100 {
// The format specifiers here make sure that
// the ones digits line up for all three possible formats.
if nsop < 10 {
ns = fmt.Sprintf("%13.2f ns/op", float64(c.duration.Nanoseconds())/float64(c.N))
} else {
ns = fmt.Sprintf("%12.1f ns/op", float64(c.duration.Nanoseconds())/float64(c.N))
}
}
memStats := ""
if c.benchMem {
allocedBytes := fmt.Sprintf("%8d B/op", int64(c.netBytes)/int64(c.N))
allocs := fmt.Sprintf("%8d allocs/op", int64(c.netAllocs)/int64(c.N))
memStats = fmt.Sprintf("\t%s\t%s", allocedBytes, allocs)
}
return fmt.Sprintf("%8d\t%s%s%s", c.N, ns, mb, memStats)
}
func min(x, y int) int {
if x > y {
return y
}
return x
}
func max(x, y int) int {
if x < y {
return y
}
return x
}
// roundDown10 rounds a number down to the nearest power of 10.
func roundDown10(n int) int {
var tens = 0
// tens = floor(log_10(n))
for n > 10 {
n = n / 10
tens++
}
// result = 10^tens
result := 1
for i := 0; i < tens; i++ {
result *= 10
}
return result
}
// roundUp rounds x up to a number of the form [1eX, 2eX, 5eX].
func roundUp(n int) int {
base := roundDown10(n)
if n < (2 * base) {
return 2 * base
}
if n < (5 * base) {
return 5 * base
}
return 10 * base
}

View File

@ -1,939 +0,0 @@
// Package check is a rich testing extension for Go's testing package.
//
// For details about the project, see:
//
// http://labix.org/gocheck
//
package check
import (
"bytes"
"errors"
"fmt"
"io"
"math/rand"
"os"
"path"
"path/filepath"
"reflect"
"regexp"
"runtime"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
)
// -----------------------------------------------------------------------
// Internal type which deals with suite method calling.
const (
fixtureKd = iota
testKd
)
type funcKind int
const (
succeededSt = iota
failedSt
skippedSt
panickedSt
fixturePanickedSt
missedSt
)
type funcStatus uint32
// A method value can't reach its own Method structure.
type methodType struct {
reflect.Value
Info reflect.Method
}
func newMethod(receiver reflect.Value, i int) *methodType {
return &methodType{receiver.Method(i), receiver.Type().Method(i)}
}
func (method *methodType) PC() uintptr {
return method.Info.Func.Pointer()
}
func (method *methodType) suiteName() string {
t := method.Info.Type.In(0)
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
return t.Name()
}
func (method *methodType) String() string {
return method.suiteName() + "." + method.Info.Name
}
func (method *methodType) matches(re *regexp.Regexp) bool {
return (re.MatchString(method.Info.Name) ||
re.MatchString(method.suiteName()) ||
re.MatchString(method.String()))
}
type C struct {
method *methodType
kind funcKind
testName string
_status funcStatus
logb *logger
logw io.Writer
done chan *C
reason string
mustFail bool
tempDir *tempDir
benchMem bool
startTime time.Time
timer
}
func (c *C) status() funcStatus {
return funcStatus(atomic.LoadUint32((*uint32)(&c._status)))
}
func (c *C) setStatus(s funcStatus) {
atomic.StoreUint32((*uint32)(&c._status), uint32(s))
}
func (c *C) stopNow() {
runtime.Goexit()
}
// logger is a concurrency safe byte.Buffer
type logger struct {
sync.Mutex
writer bytes.Buffer
}
func (l *logger) Write(buf []byte) (int, error) {
l.Lock()
defer l.Unlock()
return l.writer.Write(buf)
}
func (l *logger) WriteTo(w io.Writer) (int64, error) {
l.Lock()
defer l.Unlock()
return l.writer.WriteTo(w)
}
func (l *logger) String() string {
l.Lock()
defer l.Unlock()
return l.writer.String()
}
// -----------------------------------------------------------------------
// Handling of temporary files and directories.
type tempDir struct {
sync.Mutex
path string
counter int
}
func (td *tempDir) newPath() string {
td.Lock()
defer td.Unlock()
if td.path == "" {
var err error
for i := 0; i != 100; i++ {
path := fmt.Sprintf("%s%ccheck-%d", os.TempDir(), os.PathSeparator, rand.Int())
if err = os.Mkdir(path, 0700); err == nil {
td.path = path
break
}
}
if td.path == "" {
panic("Couldn't create temporary directory: " + err.Error())
}
}
result := filepath.Join(td.path, strconv.Itoa(td.counter))
td.counter += 1
return result
}
func (td *tempDir) removeAll() {
td.Lock()
defer td.Unlock()
if td.path != "" {
err := os.RemoveAll(td.path)
if err != nil {
fmt.Fprintf(os.Stderr, "WARNING: Error cleaning up temporaries: "+err.Error())
}
}
}
// Create a new temporary directory which is automatically removed after
// the suite finishes running.
func (c *C) MkDir() string {
path := c.tempDir.newPath()
if err := os.Mkdir(path, 0700); err != nil {
panic(fmt.Sprintf("Couldn't create temporary directory %s: %s", path, err.Error()))
}
return path
}
// -----------------------------------------------------------------------
// Low-level logging functions.
func (c *C) log(args ...interface{}) {
c.writeLog([]byte(fmt.Sprint(args...) + "\n"))
}
func (c *C) logf(format string, args ...interface{}) {
c.writeLog([]byte(fmt.Sprintf(format+"\n", args...)))
}
func (c *C) logNewLine() {
c.writeLog([]byte{'\n'})
}
func (c *C) writeLog(buf []byte) {
c.logb.Write(buf)
if c.logw != nil {
c.logw.Write(buf)
}
}
func hasStringOrError(x interface{}) (ok bool) {
_, ok = x.(fmt.Stringer)
if ok {
return
}
_, ok = x.(error)
return
}
func (c *C) logValue(label string, value interface{}) {
if label == "" {
if hasStringOrError(value) {
c.logf("... %#v (%q)", value, value)
} else {
c.logf("... %#v", value)
}
} else if value == nil {
c.logf("... %s = nil", label)
} else {
if hasStringOrError(value) {
fv := fmt.Sprintf("%#v", value)
qv := fmt.Sprintf("%q", value)
if fv != qv {
c.logf("... %s %s = %s (%s)", label, reflect.TypeOf(value), fv, qv)
return
}
}
if s, ok := value.(string); ok && isMultiLine(s) {
c.logf(`... %s %s = "" +`, label, reflect.TypeOf(value))
c.logMultiLine(s)
} else {
c.logf("... %s %s = %#v", label, reflect.TypeOf(value), value)
}
}
}
func (c *C) logMultiLine(s string) {
b := make([]byte, 0, len(s)*2)
i := 0
n := len(s)
for i < n {
j := i + 1
for j < n && s[j-1] != '\n' {
j++
}
b = append(b, "... "...)
b = strconv.AppendQuote(b, s[i:j])
if j < n {
b = append(b, " +"...)
}
b = append(b, '\n')
i = j
}
c.writeLog(b)
}
func isMultiLine(s string) bool {
for i := 0; i+1 < len(s); i++ {
if s[i] == '\n' {
return true
}
}
return false
}
func (c *C) logString(issue string) {
c.log("... ", issue)
}
func (c *C) logCaller(skip int) {
// This is a bit heavier than it ought to be.
skip += 1 // Our own frame.
pc, callerFile, callerLine, ok := runtime.Caller(skip)
if !ok {
return
}
var testFile string
var testLine int
testFunc := runtime.FuncForPC(c.method.PC())
if runtime.FuncForPC(pc) != testFunc {
for {
skip += 1
if pc, file, line, ok := runtime.Caller(skip); ok {
// Note that the test line may be different on
// distinct calls for the same test. Showing
// the "internal" line is helpful when debugging.
if runtime.FuncForPC(pc) == testFunc {
testFile, testLine = file, line
break
}
} else {
break
}
}
}
if testFile != "" && (testFile != callerFile || testLine != callerLine) {
c.logCode(testFile, testLine)
}
c.logCode(callerFile, callerLine)
}
func (c *C) logCode(path string, line int) {
c.logf("%s:%d:", nicePath(path), line)
code, err := printLine(path, line)
if code == "" {
code = "..." // XXX Open the file and take the raw line.
if err != nil {
code += err.Error()
}
}
c.log(indent(code, " "))
}
var valueGo = filepath.Join("reflect", "value.go")
var asmGo = filepath.Join("runtime", "asm_")
func (c *C) logPanic(skip int, value interface{}) {
skip++ // Our own frame.
initialSkip := skip
for ; ; skip++ {
if pc, file, line, ok := runtime.Caller(skip); ok {
if skip == initialSkip {
c.logf("... Panic: %s (PC=0x%X)\n", value, pc)
}
name := niceFuncName(pc)
path := nicePath(file)
if strings.Contains(path, "/gopkg.in/check.v") {
continue
}
if name == "Value.call" && strings.HasSuffix(path, valueGo) {
continue
}
if (name == "call16" || name == "call32") && strings.Contains(path, asmGo) {
continue
}
c.logf("%s:%d\n in %s", nicePath(file), line, name)
} else {
break
}
}
}
func (c *C) logSoftPanic(issue string) {
c.log("... Panic: ", issue)
}
func (c *C) logArgPanic(method *methodType, expectedType string) {
c.logf("... Panic: %s argument should be %s",
niceFuncName(method.PC()), expectedType)
}
// -----------------------------------------------------------------------
// Some simple formatting helpers.
var initWD, initWDErr = os.Getwd()
func init() {
if initWDErr == nil {
initWD = strings.Replace(initWD, "\\", "/", -1) + "/"
}
}
func nicePath(path string) string {
if initWDErr == nil {
if strings.HasPrefix(path, initWD) {
return path[len(initWD):]
}
}
return path
}
func niceFuncPath(pc uintptr) string {
function := runtime.FuncForPC(pc)
if function != nil {
filename, line := function.FileLine(pc)
return fmt.Sprintf("%s:%d", nicePath(filename), line)
}
return "<unknown path>"
}
func niceFuncName(pc uintptr) string {
function := runtime.FuncForPC(pc)
if function != nil {
name := path.Base(function.Name())
if i := strings.Index(name, "."); i > 0 {
name = name[i+1:]
}
if strings.HasPrefix(name, "(*") {
if i := strings.Index(name, ")"); i > 0 {
name = name[2:i] + name[i+1:]
}
}
if i := strings.LastIndex(name, ".*"); i != -1 {
name = name[:i] + "." + name[i+2:]
}
if i := strings.LastIndex(name, "·"); i != -1 {
name = name[:i] + "." + name[i+2:]
}
return name
}
return "<unknown function>"
}
// -----------------------------------------------------------------------
// Result tracker to aggregate call results.
type Result struct {
Succeeded int
Failed int
Skipped int
Panicked int
FixturePanicked int
ExpectedFailures int
Missed int // Not even tried to run, related to a panic in the fixture.
RunError error // Houston, we've got a problem.
WorkDir string // If KeepWorkDir is true
}
type resultTracker struct {
result Result
_lastWasProblem bool
_waiting int
_missed int
_expectChan chan *C
_doneChan chan *C
_stopChan chan bool
}
func newResultTracker() *resultTracker {
return &resultTracker{_expectChan: make(chan *C), // Synchronous
_doneChan: make(chan *C, 32), // Asynchronous
_stopChan: make(chan bool)} // Synchronous
}
func (tracker *resultTracker) start() {
go tracker._loopRoutine()
}
func (tracker *resultTracker) waitAndStop() {
<-tracker._stopChan
}
func (tracker *resultTracker) expectCall(c *C) {
tracker._expectChan <- c
}
func (tracker *resultTracker) callDone(c *C) {
tracker._doneChan <- c
}
func (tracker *resultTracker) _loopRoutine() {
for {
var c *C
if tracker._waiting > 0 {
// Calls still running. Can't stop.
select {
// XXX Reindent this (not now to make diff clear)
case c = <-tracker._expectChan:
tracker._waiting += 1
case c = <-tracker._doneChan:
tracker._waiting -= 1
switch c.status() {
case succeededSt:
if c.kind == testKd {
if c.mustFail {
tracker.result.ExpectedFailures++
} else {
tracker.result.Succeeded++
}
}
case failedSt:
tracker.result.Failed++
case panickedSt:
if c.kind == fixtureKd {
tracker.result.FixturePanicked++
} else {
tracker.result.Panicked++
}
case fixturePanickedSt:
// Track it as missed, since the panic
// was on the fixture, not on the test.
tracker.result.Missed++
case missedSt:
tracker.result.Missed++
case skippedSt:
if c.kind == testKd {
tracker.result.Skipped++
}
}
}
} else {
// No calls. Can stop, but no done calls here.
select {
case tracker._stopChan <- true:
return
case c = <-tracker._expectChan:
tracker._waiting += 1
case c = <-tracker._doneChan:
panic("Tracker got an unexpected done call.")
}
}
}
}
// -----------------------------------------------------------------------
// The underlying suite runner.
type suiteRunner struct {
suite interface{}
setUpSuite, tearDownSuite *methodType
setUpTest, tearDownTest *methodType
onTimeout *methodType
tests []*methodType
tracker *resultTracker
tempDir *tempDir
keepDir bool
output *outputWriter
reportedProblemLast bool
benchTime time.Duration
benchMem bool
checkTimeout time.Duration
}
type RunConf struct {
Output io.Writer
Stream bool
Verbose bool
Filter string
Benchmark bool
BenchmarkTime time.Duration // Defaults to 1 second
BenchmarkMem bool
KeepWorkDir bool
CheckTimeout time.Duration
}
// Create a new suiteRunner able to run all methods in the given suite.
func newSuiteRunner(suite interface{}, runConf *RunConf) *suiteRunner {
var conf RunConf
if runConf != nil {
conf = *runConf
}
if conf.Output == nil {
conf.Output = os.Stdout
}
if conf.Benchmark {
conf.Verbose = true
}
suiteType := reflect.TypeOf(suite)
suiteNumMethods := suiteType.NumMethod()
suiteValue := reflect.ValueOf(suite)
runner := &suiteRunner{
suite: suite,
output: newOutputWriter(conf.Output, conf.Stream, conf.Verbose),
tracker: newResultTracker(),
benchTime: conf.BenchmarkTime,
benchMem: conf.BenchmarkMem,
tempDir: &tempDir{},
keepDir: conf.KeepWorkDir,
tests: make([]*methodType, 0, suiteNumMethods),
checkTimeout: conf.CheckTimeout,
}
if runner.benchTime == 0 {
runner.benchTime = 1 * time.Second
}
var filterRegexp *regexp.Regexp
if conf.Filter != "" {
if regexp, err := regexp.Compile(conf.Filter); err != nil {
msg := "Bad filter expression: " + err.Error()
runner.tracker.result.RunError = errors.New(msg)
return runner
} else {
filterRegexp = regexp
}
}
for i := 0; i != suiteNumMethods; i++ {
method := newMethod(suiteValue, i)
switch method.Info.Name {
case "SetUpSuite":
runner.setUpSuite = method
case "TearDownSuite":
runner.tearDownSuite = method
case "SetUpTest":
runner.setUpTest = method
case "TearDownTest":
runner.tearDownTest = method
case "OnTimeout":
runner.onTimeout = method
default:
prefix := "Test"
if conf.Benchmark {
prefix = "Benchmark"
}
if !strings.HasPrefix(method.Info.Name, prefix) {
continue
}
if filterRegexp == nil || method.matches(filterRegexp) {
runner.tests = append(runner.tests, method)
}
}
}
return runner
}
// Run all methods in the given suite.
func (runner *suiteRunner) run() *Result {
if runner.tracker.result.RunError == nil && len(runner.tests) > 0 {
runner.tracker.start()
if runner.checkFixtureArgs() {
c := runner.runFixture(runner.setUpSuite, "", nil)
if c == nil || c.status() == succeededSt {
for i := 0; i != len(runner.tests); i++ {
c := runner.runTest(runner.tests[i])
if c.status() == fixturePanickedSt {
runner.skipTests(missedSt, runner.tests[i+1:])
break
}
}
} else if c != nil && c.status() == skippedSt {
runner.skipTests(skippedSt, runner.tests)
} else {
runner.skipTests(missedSt, runner.tests)
}
runner.runFixture(runner.tearDownSuite, "", nil)
} else {
runner.skipTests(missedSt, runner.tests)
}
runner.tracker.waitAndStop()
if runner.keepDir {
runner.tracker.result.WorkDir = runner.tempDir.path
} else {
runner.tempDir.removeAll()
}
}
return &runner.tracker.result
}
// Create a call object with the given suite method, and fork a
// goroutine with the provided dispatcher for running it.
func (runner *suiteRunner) forkCall(method *methodType, kind funcKind, testName string, logb *logger, dispatcher func(c *C)) *C {
var logw io.Writer
if runner.output.Stream {
logw = runner.output
}
if logb == nil {
logb = new(logger)
}
c := &C{
method: method,
kind: kind,
testName: testName,
logb: logb,
logw: logw,
tempDir: runner.tempDir,
done: make(chan *C, 1),
timer: timer{benchTime: runner.benchTime},
startTime: time.Now(),
benchMem: runner.benchMem,
}
runner.tracker.expectCall(c)
go (func() {
runner.reportCallStarted(c)
defer runner.callDone(c)
dispatcher(c)
})()
return c
}
type timeoutErr struct {
method *methodType
t time.Duration
}
func (e timeoutErr) Error() string {
return fmt.Sprintf("%s test timed out after %v", e.method.String(), e.t)
}
func isTimeout(e error) bool {
if e == nil {
return false
}
_, ok := e.(timeoutErr)
return ok
}
// Same as forkCall(), but wait for call to finish before returning.
func (runner *suiteRunner) runFunc(method *methodType, kind funcKind, testName string, logb *logger, dispatcher func(c *C)) *C {
var timeout <-chan time.Time
if runner.checkTimeout != 0 {
timeout = time.After(runner.checkTimeout)
}
c := runner.forkCall(method, kind, testName, logb, dispatcher)
select {
case <-c.done:
case <-timeout:
if runner.onTimeout != nil {
// run the OnTimeout callback, allowing the suite to collect any sort of debug information it can
// `runFixture` is syncronous, so run this in a separate goroutine with a timeout
cChan := make(chan *C)
go func() {
cChan <- runner.runFixture(runner.onTimeout, c.testName, c.logb)
}()
select {
case <-cChan:
case <-time.After(runner.checkTimeout):
}
}
panic(timeoutErr{method, runner.checkTimeout})
}
return c
}
// Handle a finished call. If there were any panics, update the call status
// accordingly. Then, mark the call as done and report to the tracker.
func (runner *suiteRunner) callDone(c *C) {
value := recover()
if value != nil {
switch v := value.(type) {
case *fixturePanic:
if v.status == skippedSt {
c.setStatus(skippedSt)
} else {
c.logSoftPanic("Fixture has panicked (see related PANIC)")
c.setStatus(fixturePanickedSt)
}
default:
c.logPanic(1, value)
c.setStatus(panickedSt)
}
}
if c.mustFail {
switch c.status() {
case failedSt:
c.setStatus(succeededSt)
case succeededSt:
c.setStatus(failedSt)
c.logString("Error: Test succeeded, but was expected to fail")
c.logString("Reason: " + c.reason)
}
}
runner.reportCallDone(c)
c.done <- c
}
// Runs a fixture call synchronously. The fixture will still be run in a
// goroutine like all suite methods, but this method will not return
// while the fixture goroutine is not done, because the fixture must be
// run in a desired order.
func (runner *suiteRunner) runFixture(method *methodType, testName string, logb *logger) *C {
if method != nil {
c := runner.runFunc(method, fixtureKd, testName, logb, func(c *C) {
c.ResetTimer()
c.StartTimer()
defer c.StopTimer()
c.method.Call([]reflect.Value{reflect.ValueOf(c)})
})
return c
}
return nil
}
// Run the fixture method with runFixture(), but panic with a fixturePanic{}
// in case the fixture method panics. This makes it easier to track the
// fixture panic together with other call panics within forkTest().
func (runner *suiteRunner) runFixtureWithPanic(method *methodType, testName string, logb *logger, skipped *bool) *C {
if skipped != nil && *skipped {
return nil
}
c := runner.runFixture(method, testName, logb)
if c != nil && c.status() != succeededSt {
if skipped != nil {
*skipped = c.status() == skippedSt
}
panic(&fixturePanic{c.status(), method})
}
return c
}
type fixturePanic struct {
status funcStatus
method *methodType
}
// Run the suite test method, together with the test-specific fixture,
// asynchronously.
func (runner *suiteRunner) forkTest(method *methodType) *C {
testName := method.String()
return runner.forkCall(method, testKd, testName, nil, func(c *C) {
var skipped bool
defer runner.runFixtureWithPanic(runner.tearDownTest, testName, nil, &skipped)
defer c.StopTimer()
benchN := 1
for {
runner.runFixtureWithPanic(runner.setUpTest, testName, c.logb, &skipped)
mt := c.method.Type()
if mt.NumIn() != 1 || mt.In(0) != reflect.TypeOf(c) {
// Rather than a plain panic, provide a more helpful message when
// the argument type is incorrect.
c.setStatus(panickedSt)
c.logArgPanic(c.method, "*check.C")
return
}
if strings.HasPrefix(c.method.Info.Name, "Test") {
c.ResetTimer()
c.StartTimer()
c.method.Call([]reflect.Value{reflect.ValueOf(c)})
return
}
if !strings.HasPrefix(c.method.Info.Name, "Benchmark") {
panic("unexpected method prefix: " + c.method.Info.Name)
}
runtime.GC()
c.N = benchN
c.ResetTimer()
c.StartTimer()
c.method.Call([]reflect.Value{reflect.ValueOf(c)})
c.StopTimer()
if c.status() != succeededSt || c.duration >= c.benchTime || benchN >= 1e9 {
return
}
perOpN := int(1e9)
if c.nsPerOp() != 0 {
perOpN = int(c.benchTime.Nanoseconds() / c.nsPerOp())
}
// Logic taken from the stock testing package:
// - Run more iterations than we think we'll need for a second (1.5x).
// - Don't grow too fast in case we had timing errors previously.
// - Be sure to run at least one more than last time.
benchN = max(min(perOpN+perOpN/2, 100*benchN), benchN+1)
benchN = roundUp(benchN)
skipped = true // Don't run the deferred one if this panics.
runner.runFixtureWithPanic(runner.tearDownTest, testName, nil, nil)
skipped = false
}
})
}
// Same as forkTest(), but wait for the test to finish before returning.
func (runner *suiteRunner) runTest(method *methodType) *C {
var timeout <-chan time.Time
if runner.checkTimeout != 0 {
timeout = time.After(runner.checkTimeout)
}
c := runner.forkTest(method)
select {
case <-c.done:
case <-timeout:
if runner.onTimeout != nil {
// run the OnTimeout callback, allowing the suite to collect any sort of debug information it can
// `runFixture` is syncronous, so run this in a separate goroutine with a timeout
cChan := make(chan *C)
go func() {
cChan <- runner.runFixture(runner.onTimeout, c.testName, c.logb)
}()
select {
case <-cChan:
case <-time.After(runner.checkTimeout):
}
}
panic(timeoutErr{method, runner.checkTimeout})
}
return c
}
// Helper to mark tests as skipped or missed. A bit heavy for what
// it does, but it enables homogeneous handling of tracking, including
// nice verbose output.
func (runner *suiteRunner) skipTests(status funcStatus, methods []*methodType) {
for _, method := range methods {
runner.runFunc(method, testKd, "", nil, func(c *C) {
c.setStatus(status)
})
}
}
// Verify if the fixture arguments are *check.C. In case of errors,
// log the error as a panic in the fixture method call, and return false.
func (runner *suiteRunner) checkFixtureArgs() bool {
succeeded := true
argType := reflect.TypeOf(&C{})
for _, method := range []*methodType{runner.setUpSuite, runner.tearDownSuite, runner.setUpTest, runner.tearDownTest, runner.onTimeout} {
if method != nil {
mt := method.Type()
if mt.NumIn() != 1 || mt.In(0) != argType {
succeeded = false
runner.runFunc(method, fixtureKd, "", nil, func(c *C) {
c.logArgPanic(method, "*check.C")
c.setStatus(panickedSt)
})
}
}
}
return succeeded
}
func (runner *suiteRunner) reportCallStarted(c *C) {
runner.output.WriteCallStarted("START", c)
}
func (runner *suiteRunner) reportCallDone(c *C) {
runner.tracker.callDone(c)
switch c.status() {
case succeededSt:
if c.mustFail {
runner.output.WriteCallSuccess("FAIL EXPECTED", c)
} else {
runner.output.WriteCallSuccess("PASS", c)
}
case skippedSt:
runner.output.WriteCallSuccess("SKIP", c)
case failedSt:
runner.output.WriteCallProblem("FAIL", c)
case panickedSt:
runner.output.WriteCallProblem("PANIC", c)
case fixturePanickedSt:
// That's a testKd call reporting that its fixture
// has panicked. The fixture call which caused the
// panic itself was tracked above. We'll report to
// aid debugging.
runner.output.WriteCallProblem("PANIC", c)
case missedSt:
runner.output.WriteCallSuccess("MISS", c)
}
}

View File

@ -1,458 +0,0 @@
package check
import (
"fmt"
"reflect"
"regexp"
)
// -----------------------------------------------------------------------
// CommentInterface and Commentf helper, to attach extra information to checks.
type comment struct {
format string
args []interface{}
}
// Commentf returns an infomational value to use with Assert or Check calls.
// If the checker test fails, the provided arguments will be passed to
// fmt.Sprintf, and will be presented next to the logged failure.
//
// For example:
//
// c.Assert(v, Equals, 42, Commentf("Iteration #%d failed.", i))
//
// Note that if the comment is constant, a better option is to
// simply use a normal comment right above or next to the line, as
// it will also get printed with any errors:
//
// c.Assert(l, Equals, 8192) // Ensure buffer size is correct (bug #123)
//
func Commentf(format string, args ...interface{}) CommentInterface {
return &comment{format, args}
}
// CommentInterface must be implemented by types that attach extra
// information to failed checks. See the Commentf function for details.
type CommentInterface interface {
CheckCommentString() string
}
func (c *comment) CheckCommentString() string {
return fmt.Sprintf(c.format, c.args...)
}
// -----------------------------------------------------------------------
// The Checker interface.
// The Checker interface must be provided by checkers used with
// the Assert and Check verification methods.
type Checker interface {
Info() *CheckerInfo
Check(params []interface{}, names []string) (result bool, error string)
}
// See the Checker interface.
type CheckerInfo struct {
Name string
Params []string
}
func (info *CheckerInfo) Info() *CheckerInfo {
return info
}
// -----------------------------------------------------------------------
// Not checker logic inverter.
// The Not checker inverts the logic of the provided checker. The
// resulting checker will succeed where the original one failed, and
// vice-versa.
//
// For example:
//
// c.Assert(a, Not(Equals), b)
//
func Not(checker Checker) Checker {
return &notChecker{checker}
}
type notChecker struct {
sub Checker
}
func (checker *notChecker) Info() *CheckerInfo {
info := *checker.sub.Info()
info.Name = "Not(" + info.Name + ")"
return &info
}
func (checker *notChecker) Check(params []interface{}, names []string) (result bool, error string) {
result, error = checker.sub.Check(params, names)
result = !result
return
}
// -----------------------------------------------------------------------
// IsNil checker.
type isNilChecker struct {
*CheckerInfo
}
// The IsNil checker tests whether the obtained value is nil.
//
// For example:
//
// c.Assert(err, IsNil)
//
var IsNil Checker = &isNilChecker{
&CheckerInfo{Name: "IsNil", Params: []string{"value"}},
}
func (checker *isNilChecker) Check(params []interface{}, names []string) (result bool, error string) {
return isNil(params[0]), ""
}
func isNil(obtained interface{}) (result bool) {
if obtained == nil {
result = true
} else {
switch v := reflect.ValueOf(obtained); v.Kind() {
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
return v.IsNil()
}
}
return
}
// -----------------------------------------------------------------------
// NotNil checker. Alias for Not(IsNil), since it's so common.
type notNilChecker struct {
*CheckerInfo
}
// The NotNil checker verifies that the obtained value is not nil.
//
// For example:
//
// c.Assert(iface, NotNil)
//
// This is an alias for Not(IsNil), made available since it's a
// fairly common check.
//
var NotNil Checker = &notNilChecker{
&CheckerInfo{Name: "NotNil", Params: []string{"value"}},
}
func (checker *notNilChecker) Check(params []interface{}, names []string) (result bool, error string) {
return !isNil(params[0]), ""
}
// -----------------------------------------------------------------------
// Equals checker.
type equalsChecker struct {
*CheckerInfo
}
// The Equals checker verifies that the obtained value is equal to
// the expected value, according to usual Go semantics for ==.
//
// For example:
//
// c.Assert(value, Equals, 42)
//
var Equals Checker = &equalsChecker{
&CheckerInfo{Name: "Equals", Params: []string{"obtained", "expected"}},
}
func (checker *equalsChecker) Check(params []interface{}, names []string) (result bool, error string) {
defer func() {
if v := recover(); v != nil {
result = false
error = fmt.Sprint(v)
}
}()
return params[0] == params[1], ""
}
// -----------------------------------------------------------------------
// DeepEquals checker.
type deepEqualsChecker struct {
*CheckerInfo
}
// The DeepEquals checker verifies that the obtained value is deep-equal to
// the expected value. The check will work correctly even when facing
// slices, interfaces, and values of different types (which always fail
// the test).
//
// For example:
//
// c.Assert(value, DeepEquals, 42)
// c.Assert(array, DeepEquals, []string{"hi", "there"})
//
var DeepEquals Checker = &deepEqualsChecker{
&CheckerInfo{Name: "DeepEquals", Params: []string{"obtained", "expected"}},
}
func (checker *deepEqualsChecker) Check(params []interface{}, names []string) (result bool, error string) {
return reflect.DeepEqual(params[0], params[1]), ""
}
// -----------------------------------------------------------------------
// HasLen checker.
type hasLenChecker struct {
*CheckerInfo
}
// The HasLen checker verifies that the obtained value has the
// provided length. In many cases this is superior to using Equals
// in conjuction with the len function because in case the check
// fails the value itself will be printed, instead of its length,
// providing more details for figuring the problem.
//
// For example:
//
// c.Assert(list, HasLen, 5)
//
var HasLen Checker = &hasLenChecker{
&CheckerInfo{Name: "HasLen", Params: []string{"obtained", "n"}},
}
func (checker *hasLenChecker) Check(params []interface{}, names []string) (result bool, error string) {
n, ok := params[1].(int)
if !ok {
return false, "n must be an int"
}
value := reflect.ValueOf(params[0])
switch value.Kind() {
case reflect.Map, reflect.Array, reflect.Slice, reflect.Chan, reflect.String:
default:
return false, "obtained value type has no length"
}
return value.Len() == n, ""
}
// -----------------------------------------------------------------------
// ErrorMatches checker.
type errorMatchesChecker struct {
*CheckerInfo
}
// The ErrorMatches checker verifies that the error value
// is non nil and matches the regular expression provided.
//
// For example:
//
// c.Assert(err, ErrorMatches, "perm.*denied")
//
var ErrorMatches Checker = errorMatchesChecker{
&CheckerInfo{Name: "ErrorMatches", Params: []string{"value", "regex"}},
}
func (checker errorMatchesChecker) Check(params []interface{}, names []string) (result bool, errStr string) {
if params[0] == nil {
return false, "Error value is nil"
}
err, ok := params[0].(error)
if !ok {
return false, "Value is not an error"
}
params[0] = err.Error()
names[0] = "error"
return matches(params[0], params[1])
}
// -----------------------------------------------------------------------
// Matches checker.
type matchesChecker struct {
*CheckerInfo
}
// The Matches checker verifies that the string provided as the obtained
// value (or the string resulting from obtained.String()) matches the
// regular expression provided.
//
// For example:
//
// c.Assert(err, Matches, "perm.*denied")
//
var Matches Checker = &matchesChecker{
&CheckerInfo{Name: "Matches", Params: []string{"value", "regex"}},
}
func (checker *matchesChecker) Check(params []interface{}, names []string) (result bool, error string) {
return matches(params[0], params[1])
}
func matches(value, regex interface{}) (result bool, error string) {
reStr, ok := regex.(string)
if !ok {
return false, "Regex must be a string"
}
valueStr, valueIsStr := value.(string)
if !valueIsStr {
if valueWithStr, valueHasStr := value.(fmt.Stringer); valueHasStr {
valueStr, valueIsStr = valueWithStr.String(), true
}
}
if valueIsStr {
matches, err := regexp.MatchString("^"+reStr+"$", valueStr)
if err != nil {
return false, "Can't compile regex: " + err.Error()
}
return matches, ""
}
return false, "Obtained value is not a string and has no .String()"
}
// -----------------------------------------------------------------------
// Panics checker.
type panicsChecker struct {
*CheckerInfo
}
// The Panics checker verifies that calling the provided zero-argument
// function will cause a panic which is deep-equal to the provided value.
//
// For example:
//
// c.Assert(func() { f(1, 2) }, Panics, &SomeErrorType{"BOOM"}).
//
//
var Panics Checker = &panicsChecker{
&CheckerInfo{Name: "Panics", Params: []string{"function", "expected"}},
}
func (checker *panicsChecker) Check(params []interface{}, names []string) (result bool, error string) {
f := reflect.ValueOf(params[0])
if f.Kind() != reflect.Func || f.Type().NumIn() != 0 {
return false, "Function must take zero arguments"
}
defer func() {
// If the function has not panicked, then don't do the check.
if error != "" {
return
}
params[0] = recover()
names[0] = "panic"
result = reflect.DeepEqual(params[0], params[1])
}()
f.Call(nil)
return false, "Function has not panicked"
}
type panicMatchesChecker struct {
*CheckerInfo
}
// The PanicMatches checker verifies that calling the provided zero-argument
// function will cause a panic with an error value matching
// the regular expression provided.
//
// For example:
//
// c.Assert(func() { f(1, 2) }, PanicMatches, `open.*: no such file or directory`).
//
//
var PanicMatches Checker = &panicMatchesChecker{
&CheckerInfo{Name: "PanicMatches", Params: []string{"function", "expected"}},
}
func (checker *panicMatchesChecker) Check(params []interface{}, names []string) (result bool, errmsg string) {
f := reflect.ValueOf(params[0])
if f.Kind() != reflect.Func || f.Type().NumIn() != 0 {
return false, "Function must take zero arguments"
}
defer func() {
// If the function has not panicked, then don't do the check.
if errmsg != "" {
return
}
obtained := recover()
names[0] = "panic"
if e, ok := obtained.(error); ok {
params[0] = e.Error()
} else if _, ok := obtained.(string); ok {
params[0] = obtained
} else {
errmsg = "Panic value is not a string or an error"
return
}
result, errmsg = matches(params[0], params[1])
}()
f.Call(nil)
return false, "Function has not panicked"
}
// -----------------------------------------------------------------------
// FitsTypeOf checker.
type fitsTypeChecker struct {
*CheckerInfo
}
// The FitsTypeOf checker verifies that the obtained value is
// assignable to a variable with the same type as the provided
// sample value.
//
// For example:
//
// c.Assert(value, FitsTypeOf, int64(0))
// c.Assert(value, FitsTypeOf, os.Error(nil))
//
var FitsTypeOf Checker = &fitsTypeChecker{
&CheckerInfo{Name: "FitsTypeOf", Params: []string{"obtained", "sample"}},
}
func (checker *fitsTypeChecker) Check(params []interface{}, names []string) (result bool, error string) {
obtained := reflect.ValueOf(params[0])
sample := reflect.ValueOf(params[1])
if !obtained.IsValid() {
return false, ""
}
if !sample.IsValid() {
return false, "Invalid sample value"
}
return obtained.Type().AssignableTo(sample.Type()), ""
}
// -----------------------------------------------------------------------
// Implements checker.
type implementsChecker struct {
*CheckerInfo
}
// The Implements checker verifies that the obtained value
// implements the interface specified via a pointer to an interface
// variable.
//
// For example:
//
// var e os.Error
// c.Assert(err, Implements, &e)
//
var Implements Checker = &implementsChecker{
&CheckerInfo{Name: "Implements", Params: []string{"obtained", "ifaceptr"}},
}
func (checker *implementsChecker) Check(params []interface{}, names []string) (result bool, error string) {
obtained := reflect.ValueOf(params[0])
ifaceptr := reflect.ValueOf(params[1])
if !obtained.IsValid() {
return false, ""
}
if !ifaceptr.IsValid() || ifaceptr.Kind() != reflect.Ptr || ifaceptr.Elem().Kind() != reflect.Interface {
return false, "ifaceptr should be a pointer to an interface variable"
}
return obtained.Type().Implements(ifaceptr.Elem().Type()), ""
}

View File

@ -1,231 +0,0 @@
package check
import (
"fmt"
"strings"
"time"
)
// TestName returns the current test name in the form "SuiteName.TestName"
func (c *C) TestName() string {
return c.testName
}
// -----------------------------------------------------------------------
// Basic succeeding/failing logic.
// Failed returns whether the currently running test has already failed.
func (c *C) Failed() bool {
return c.status() == failedSt
}
// Fail marks the currently running test as failed.
//
// Something ought to have been previously logged so the developer can tell
// what went wrong. The higher level helper functions will fail the test
// and do the logging properly.
func (c *C) Fail() {
c.setStatus(failedSt)
}
// FailNow marks the currently running test as failed and stops running it.
// Something ought to have been previously logged so the developer can tell
// what went wrong. The higher level helper functions will fail the test
// and do the logging properly.
func (c *C) FailNow() {
c.Fail()
c.stopNow()
}
// Succeed marks the currently running test as succeeded, undoing any
// previous failures.
func (c *C) Succeed() {
c.setStatus(succeededSt)
}
// SucceedNow marks the currently running test as succeeded, undoing any
// previous failures, and stops running the test.
func (c *C) SucceedNow() {
c.Succeed()
c.stopNow()
}
// ExpectFailure informs that the running test is knowingly broken for
// the provided reason. If the test does not fail, an error will be reported
// to raise attention to this fact. This method is useful to temporarily
// disable tests which cover well known problems until a better time to
// fix the problem is found, without forgetting about the fact that a
// failure still exists.
func (c *C) ExpectFailure(reason string) {
if reason == "" {
panic("Missing reason why the test is expected to fail")
}
c.mustFail = true
c.reason = reason
}
// Skip skips the running test for the provided reason. If run from within
// SetUpTest, the individual test being set up will be skipped, and if run
// from within SetUpSuite, the whole suite is skipped.
func (c *C) Skip(reason string) {
if reason == "" {
panic("Missing reason why the test is being skipped")
}
c.reason = reason
c.setStatus(skippedSt)
c.stopNow()
}
// -----------------------------------------------------------------------
// Basic logging.
// GetTestLog returns the current test error output.
func (c *C) GetTestLog() string {
return c.logb.String()
}
// Log logs some information into the test error output.
// The provided arguments are assembled together into a string with fmt.Sprint.
func (c *C) Log(args ...interface{}) {
c.log(args...)
}
// Log logs some information into the test error output.
// The provided arguments are assembled together into a string with fmt.Sprintf.
func (c *C) Logf(format string, args ...interface{}) {
c.logf(format, args...)
}
// Output enables *C to be used as a logger in functions that require only
// the minimum interface of *log.Logger.
func (c *C) Output(calldepth int, s string) error {
d := time.Now().Sub(c.startTime)
msec := d / time.Millisecond
sec := d / time.Second
min := d / time.Minute
c.Logf("[LOG] %d:%02d.%03d %s", min, sec%60, msec%1000, s)
return nil
}
// Error logs an error into the test error output and marks the test as failed.
// The provided arguments are assembled together into a string with fmt.Sprint.
func (c *C) Error(args ...interface{}) {
c.logCaller(1)
c.logString(fmt.Sprint("Error: ", fmt.Sprint(args...)))
c.logNewLine()
c.Fail()
}
// Errorf logs an error into the test error output and marks the test as failed.
// The provided arguments are assembled together into a string with fmt.Sprintf.
func (c *C) Errorf(format string, args ...interface{}) {
c.logCaller(1)
c.logString(fmt.Sprintf("Error: "+format, args...))
c.logNewLine()
c.Fail()
}
// Fatal logs an error into the test error output, marks the test as failed, and
// stops the test execution. The provided arguments are assembled together into
// a string with fmt.Sprint.
func (c *C) Fatal(args ...interface{}) {
c.logCaller(1)
c.logString(fmt.Sprint("Error: ", fmt.Sprint(args...)))
c.logNewLine()
c.FailNow()
}
// Fatlaf logs an error into the test error output, marks the test as failed, and
// stops the test execution. The provided arguments are assembled together into
// a string with fmt.Sprintf.
func (c *C) Fatalf(format string, args ...interface{}) {
c.logCaller(1)
c.logString(fmt.Sprint("Error: ", fmt.Sprintf(format, args...)))
c.logNewLine()
c.FailNow()
}
// -----------------------------------------------------------------------
// Generic checks and assertions based on checkers.
// Check verifies if the first value matches the expected value according
// to the provided checker. If they do not match, an error is logged, the
// test is marked as failed, and the test execution continues.
//
// Some checkers may not need the expected argument (e.g. IsNil).
//
// Extra arguments provided to the function are logged next to the reported
// problem when the matching fails.
func (c *C) Check(obtained interface{}, checker Checker, args ...interface{}) bool {
return c.internalCheck("Check", obtained, checker, args...)
}
// Assert ensures that the first value matches the expected value according
// to the provided checker. If they do not match, an error is logged, the
// test is marked as failed, and the test execution stops.
//
// Some checkers may not need the expected argument (e.g. IsNil).
//
// Extra arguments provided to the function are logged next to the reported
// problem when the matching fails.
func (c *C) Assert(obtained interface{}, checker Checker, args ...interface{}) {
if !c.internalCheck("Assert", obtained, checker, args...) {
c.stopNow()
}
}
func (c *C) internalCheck(funcName string, obtained interface{}, checker Checker, args ...interface{}) bool {
if checker == nil {
c.logCaller(2)
c.logString(fmt.Sprintf("%s(obtained, nil!?, ...):", funcName))
c.logString("Oops.. you've provided a nil checker!")
c.logNewLine()
c.Fail()
return false
}
// If the last argument is a bug info, extract it out.
var comment CommentInterface
if len(args) > 0 {
if c, ok := args[len(args)-1].(CommentInterface); ok {
comment = c
args = args[:len(args)-1]
}
}
params := append([]interface{}{obtained}, args...)
info := checker.Info()
if len(params) != len(info.Params) {
names := append([]string{info.Params[0], info.Name}, info.Params[1:]...)
c.logCaller(2)
c.logString(fmt.Sprintf("%s(%s):", funcName, strings.Join(names, ", ")))
c.logString(fmt.Sprintf("Wrong number of parameters for %s: want %d, got %d", info.Name, len(names), len(params)+1))
c.logNewLine()
c.Fail()
return false
}
// Copy since it may be mutated by Check.
names := append([]string{}, info.Params...)
// Do the actual check.
result, error := checker.Check(params, names)
if !result || error != "" {
c.logCaller(2)
for i := 0; i != len(params); i++ {
c.logValue(names[i], params[i])
}
if comment != nil {
c.logString(comment.CheckCommentString())
}
if error != "" {
c.logString(error)
}
c.logNewLine()
c.Fail()
return false
}
return true
}

View File

@ -1,168 +0,0 @@
package check
import (
"bytes"
"go/ast"
"go/parser"
"go/printer"
"go/token"
"os"
)
func indent(s, with string) (r string) {
eol := true
for i := 0; i != len(s); i++ {
c := s[i]
switch {
case eol && c == '\n' || c == '\r':
case c == '\n' || c == '\r':
eol = true
case eol:
eol = false
s = s[:i] + with + s[i:]
i += len(with)
}
}
return s
}
func printLine(filename string, line int) (string, error) {
fset := token.NewFileSet()
file, err := os.Open(filename)
if err != nil {
return "", err
}
fnode, err := parser.ParseFile(fset, filename, file, parser.ParseComments)
if err != nil {
return "", err
}
config := &printer.Config{Mode: printer.UseSpaces, Tabwidth: 4}
lp := &linePrinter{fset: fset, fnode: fnode, line: line, config: config}
ast.Walk(lp, fnode)
result := lp.output.Bytes()
// Comments leave \n at the end.
n := len(result)
for n > 0 && result[n-1] == '\n' {
n--
}
return string(result[:n]), nil
}
type linePrinter struct {
config *printer.Config
fset *token.FileSet
fnode *ast.File
line int
output bytes.Buffer
stmt ast.Stmt
}
func (lp *linePrinter) emit() bool {
if lp.stmt != nil {
lp.trim(lp.stmt)
lp.printWithComments(lp.stmt)
lp.stmt = nil
return true
}
return false
}
func (lp *linePrinter) printWithComments(n ast.Node) {
nfirst := lp.fset.Position(n.Pos()).Line
nlast := lp.fset.Position(n.End()).Line
for _, g := range lp.fnode.Comments {
cfirst := lp.fset.Position(g.Pos()).Line
clast := lp.fset.Position(g.End()).Line
if clast == nfirst-1 && lp.fset.Position(n.Pos()).Column == lp.fset.Position(g.Pos()).Column {
for _, c := range g.List {
lp.output.WriteString(c.Text)
lp.output.WriteByte('\n')
}
}
if cfirst >= nfirst && cfirst <= nlast && n.End() <= g.List[0].Slash {
// The printer will not include the comment if it starts past
// the node itself. Trick it into printing by overlapping the
// slash with the end of the statement.
g.List[0].Slash = n.End() - 1
}
}
node := &printer.CommentedNode{n, lp.fnode.Comments}
lp.config.Fprint(&lp.output, lp.fset, node)
}
func (lp *linePrinter) Visit(n ast.Node) (w ast.Visitor) {
if n == nil {
if lp.output.Len() == 0 {
lp.emit()
}
return nil
}
first := lp.fset.Position(n.Pos()).Line
last := lp.fset.Position(n.End()).Line
if first <= lp.line && last >= lp.line {
// Print the innermost statement containing the line.
if stmt, ok := n.(ast.Stmt); ok {
if _, ok := n.(*ast.BlockStmt); !ok {
lp.stmt = stmt
}
}
if first == lp.line && lp.emit() {
return nil
}
return lp
}
return nil
}
func (lp *linePrinter) trim(n ast.Node) bool {
stmt, ok := n.(ast.Stmt)
if !ok {
return true
}
line := lp.fset.Position(n.Pos()).Line
if line != lp.line {
return false
}
switch stmt := stmt.(type) {
case *ast.IfStmt:
stmt.Body = lp.trimBlock(stmt.Body)
case *ast.SwitchStmt:
stmt.Body = lp.trimBlock(stmt.Body)
case *ast.TypeSwitchStmt:
stmt.Body = lp.trimBlock(stmt.Body)
case *ast.CaseClause:
stmt.Body = lp.trimList(stmt.Body)
case *ast.CommClause:
stmt.Body = lp.trimList(stmt.Body)
case *ast.BlockStmt:
stmt.List = lp.trimList(stmt.List)
}
return true
}
func (lp *linePrinter) trimBlock(stmt *ast.BlockStmt) *ast.BlockStmt {
if !lp.trim(stmt) {
return lp.emptyBlock(stmt)
}
stmt.Rbrace = stmt.Lbrace
return stmt
}
func (lp *linePrinter) trimList(stmts []ast.Stmt) []ast.Stmt {
for i := 0; i != len(stmts); i++ {
if !lp.trim(stmts[i]) {
stmts[i] = lp.emptyStmt(stmts[i])
break
}
}
return stmts
}
func (lp *linePrinter) emptyStmt(n ast.Node) *ast.ExprStmt {
return &ast.ExprStmt{&ast.Ellipsis{n.Pos(), nil}}
}
func (lp *linePrinter) emptyBlock(n ast.Node) *ast.BlockStmt {
p := n.Pos()
return &ast.BlockStmt{p, []ast.Stmt{lp.emptyStmt(n)}, p}
}

View File

@ -1,88 +0,0 @@
package check
import (
"fmt"
"io"
"sync"
)
// -----------------------------------------------------------------------
// Output writer manages atomic output writing according to settings.
type outputWriter struct {
m sync.Mutex
writer io.Writer
wroteCallProblemLast bool
Stream bool
Verbose bool
}
func newOutputWriter(writer io.Writer, stream, verbose bool) *outputWriter {
return &outputWriter{writer: writer, Stream: stream, Verbose: verbose}
}
func (ow *outputWriter) Write(content []byte) (n int, err error) {
ow.m.Lock()
n, err = ow.writer.Write(content)
ow.m.Unlock()
return
}
func (ow *outputWriter) WriteCallStarted(label string, c *C) {
if ow.Stream {
header := renderCallHeader(label, c, "", "\n")
ow.m.Lock()
ow.writer.Write([]byte(header))
ow.m.Unlock()
}
}
func (ow *outputWriter) WriteCallProblem(label string, c *C) {
var prefix string
if !ow.Stream {
prefix = "\n-----------------------------------" +
"-----------------------------------\n"
}
header := renderCallHeader(label, c, prefix, "\n\n")
ow.m.Lock()
ow.wroteCallProblemLast = true
ow.writer.Write([]byte(header))
if !ow.Stream {
c.logb.WriteTo(ow.writer)
}
ow.m.Unlock()
}
func (ow *outputWriter) WriteCallSuccess(label string, c *C) {
if ow.Stream || (ow.Verbose && c.kind == testKd) {
// TODO Use a buffer here.
var suffix string
if c.reason != "" {
suffix = " (" + c.reason + ")"
}
if c.status() == succeededSt {
suffix += "\t" + c.timerString()
}
suffix += "\n"
if ow.Stream {
suffix += "\n"
}
header := renderCallHeader(label, c, "", suffix)
ow.m.Lock()
// Resist temptation of using line as prefix above due to race.
if !ow.Stream && ow.wroteCallProblemLast {
header = "\n-----------------------------------" +
"-----------------------------------\n" +
header
}
ow.wroteCallProblemLast = false
ow.writer.Write([]byte(header))
ow.m.Unlock()
}
}
func renderCallHeader(label string, c *C, prefix, suffix string) string {
pc := c.method.PC()
return fmt.Sprintf("%s%s: %s: %s%s", prefix, label, niceFuncPath(pc),
niceFuncName(pc), suffix)
}

View File

@ -1,183 +0,0 @@
package check
import (
"bufio"
"flag"
"fmt"
"os"
"testing"
"time"
)
// -----------------------------------------------------------------------
// Test suite registry.
var allSuites []interface{}
// Suite registers the given value as a test suite to be run. Any methods
// starting with the Test prefix in the given value will be considered as
// a test method.
func Suite(suite interface{}) interface{} {
allSuites = append(allSuites, suite)
return suite
}
// -----------------------------------------------------------------------
// Public running interface.
var (
oldFilterFlag = flag.String("gocheck.f", "", "Regular expression selecting which tests and/or suites to run")
oldVerboseFlag = flag.Bool("gocheck.v", false, "Verbose mode")
oldStreamFlag = flag.Bool("gocheck.vv", false, "Super verbose mode (disables output caching)")
oldBenchFlag = flag.Bool("gocheck.b", false, "Run benchmarks")
oldBenchTime = flag.Duration("gocheck.btime", 1*time.Second, "approximate run time for each benchmark")
oldListFlag = flag.Bool("gocheck.list", false, "List the names of all tests that will be run")
oldWorkFlag = flag.Bool("gocheck.work", false, "Display and do not remove the test working directory")
newFilterFlag = flag.String("check.f", "", "Regular expression selecting which tests and/or suites to run")
newVerboseFlag = flag.Bool("check.v", false, "Verbose mode")
newStreamFlag = flag.Bool("check.vv", false, "Super verbose mode (disables output caching)")
newBenchFlag = flag.Bool("check.b", false, "Run benchmarks")
newBenchTime = flag.Duration("check.btime", 1*time.Second, "approximate run time for each benchmark")
newBenchMem = flag.Bool("check.bmem", false, "Report memory benchmarks")
newListFlag = flag.Bool("check.list", false, "List the names of all tests that will be run")
newWorkFlag = flag.Bool("check.work", false, "Display and do not remove the test working directory")
checkTimeout = flag.String("check.timeout", "", "Panic if test runs longer than specified duration")
)
// TestingT runs all test suites registered with the Suite function,
// printing results to stdout, and reporting any failures back to
// the "testing" package.
func TestingT(testingT *testing.T) {
benchTime := *newBenchTime
if benchTime == 1*time.Second {
benchTime = *oldBenchTime
}
conf := &RunConf{
Filter: *oldFilterFlag + *newFilterFlag,
Verbose: *oldVerboseFlag || *newVerboseFlag,
Stream: *oldStreamFlag || *newStreamFlag,
Benchmark: *oldBenchFlag || *newBenchFlag,
BenchmarkTime: benchTime,
BenchmarkMem: *newBenchMem,
KeepWorkDir: *oldWorkFlag || *newWorkFlag,
}
if *checkTimeout != "" {
timeout, err := time.ParseDuration(*checkTimeout)
if err != nil {
testingT.Fatalf("error parsing specified timeout flag: %v", err)
}
conf.CheckTimeout = timeout
}
if *oldListFlag || *newListFlag {
w := bufio.NewWriter(os.Stdout)
for _, name := range ListAll(conf) {
fmt.Fprintln(w, name)
}
w.Flush()
return
}
result := RunAll(conf)
println(result.String())
if !result.Passed() {
testingT.Fail()
}
}
// RunAll runs all test suites registered with the Suite function, using the
// provided run configuration.
func RunAll(runConf *RunConf) *Result {
result := Result{}
for _, suite := range allSuites {
result.Add(Run(suite, runConf))
}
return &result
}
// Run runs the provided test suite using the provided run configuration.
func Run(suite interface{}, runConf *RunConf) *Result {
runner := newSuiteRunner(suite, runConf)
return runner.run()
}
// ListAll returns the names of all the test functions registered with the
// Suite function that will be run with the provided run configuration.
func ListAll(runConf *RunConf) []string {
var names []string
for _, suite := range allSuites {
names = append(names, List(suite, runConf)...)
}
return names
}
// List returns the names of the test functions in the given
// suite that will be run with the provided run configuration.
func List(suite interface{}, runConf *RunConf) []string {
var names []string
runner := newSuiteRunner(suite, runConf)
for _, t := range runner.tests {
names = append(names, t.String())
}
return names
}
// -----------------------------------------------------------------------
// Result methods.
func (r *Result) Add(other *Result) {
r.Succeeded += other.Succeeded
r.Skipped += other.Skipped
r.Failed += other.Failed
r.Panicked += other.Panicked
r.FixturePanicked += other.FixturePanicked
r.ExpectedFailures += other.ExpectedFailures
r.Missed += other.Missed
if r.WorkDir != "" && other.WorkDir != "" {
r.WorkDir += ":" + other.WorkDir
} else if other.WorkDir != "" {
r.WorkDir = other.WorkDir
}
}
func (r *Result) Passed() bool {
return (r.Failed == 0 && r.Panicked == 0 &&
r.FixturePanicked == 0 && r.Missed == 0 &&
r.RunError == nil)
}
func (r *Result) String() string {
if r.RunError != nil {
return "ERROR: " + r.RunError.Error()
}
var value string
if r.Failed == 0 && r.Panicked == 0 && r.FixturePanicked == 0 &&
r.Missed == 0 {
value = "OK: "
} else {
value = "OOPS: "
}
value += fmt.Sprintf("%d passed", r.Succeeded)
if r.Skipped != 0 {
value += fmt.Sprintf(", %d skipped", r.Skipped)
}
if r.ExpectedFailures != 0 {
value += fmt.Sprintf(", %d expected failures", r.ExpectedFailures)
}
if r.Failed != 0 {
value += fmt.Sprintf(", %d FAILED", r.Failed)
}
if r.Panicked != 0 {
value += fmt.Sprintf(", %d PANICKED", r.Panicked)
}
if r.FixturePanicked != 0 {
value += fmt.Sprintf(", %d FIXTURE-PANICKED", r.FixturePanicked)
}
if r.Missed != 0 {
value += fmt.Sprintf(", %d MISSED", r.Missed)
}
if r.WorkDir != "" {
value += "\nWORK=" + r.WorkDir
}
return value
}

201
vendor/github.com/opencontainers/selinux/LICENSE generated vendored Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

7
vendor/github.com/opencontainers/selinux/README.md generated vendored Normal file
View File

@ -0,0 +1,7 @@
# selinux
[![GoDoc](https://godoc.org/github.com/opencontainers/selinux?status.svg)](https://godoc.org/github.com/opencontainers/selinux) [![Go Report Card](https://goreportcard.com/badge/github.com/opencontainers/selinux)](https://goreportcard.com/report/github.com/opencontainers/selinux) [![Build Status](https://travis-ci.org/opencontainers/selinux.svg?branch=master)](https://travis-ci.org/opencontainers/selinux)
Common SELinux package used across the container ecosystem.
Please see the [godoc](https://godoc.org/github.com/opencontainers/selinux) for more information.

View File

@ -52,7 +52,7 @@ func ReserveLabel(label string) error {
return nil
}
func UnreserveLabel(label string) error {
func ReleaseLabel(label string) error {
return nil
}

View File

@ -6,7 +6,7 @@ import (
"fmt"
"strings"
"github.com/opencontainers/runc/libcontainer/selinux"
"github.com/opencontainers/selinux/go-selinux"
)
// Valid Label Options
@ -25,10 +25,10 @@ var ErrIncompatibleLabel = fmt.Errorf("Bad SELinux option z and Z can not be use
// the labels. The labels returned will include a random MCS String, that is
// guaranteed to be unique.
func InitLabels(options []string) (string, string, error) {
if !selinux.SelinuxEnabled() {
if !selinux.GetEnabled() {
return "", "", nil
}
processLabel, mountLabel := selinux.GetLxcContexts()
processLabel, mountLabel := selinux.ContainerLabels()
if processLabel != "" {
pcon := selinux.NewContext(processLabel)
mcon := selinux.NewContext(mountLabel)
@ -55,8 +55,8 @@ func InitLabels(options []string) (string, string, error) {
return processLabel, mountLabel, nil
}
func GetROMountLabel() string {
return selinux.GetROFileLabel()
func ROMountLabel() string {
return selinux.ROFileLabel()
}
// DEPRECATED: The GenLabels function is only to be used during the transition to the official API.
@ -88,33 +88,33 @@ func SetProcessLabel(processLabel string) error {
if processLabel == "" {
return nil
}
return selinux.Setexeccon(processLabel)
return selinux.SetExecLabel(processLabel)
}
// GetProcessLabel returns the process label that the kernel will assign
// ProcessLabel returns the process label that the kernel will assign
// to the next program executed by the current process. If "" is returned
// this indicates that the default labeling will happen for the process.
func GetProcessLabel() (string, error) {
return selinux.Getexeccon()
func ProcessLabel() (string, error) {
return selinux.ExecLabel()
}
// GetFileLabel returns the label for specified path
func GetFileLabel(path string) (string, error) {
return selinux.Getfilecon(path)
func FileLabel(path string) (string, error) {
return selinux.FileLabel(path)
}
// SetFileLabel modifies the "path" label to the specified file label
func SetFileLabel(path string, fileLabel string) error {
if selinux.SelinuxEnabled() && fileLabel != "" {
return selinux.Setfilecon(path, fileLabel)
if selinux.GetEnabled() && fileLabel != "" {
return selinux.SetFileLabel(path, fileLabel)
}
return nil
}
// SetFileCreateLabel tells the kernel the label for all files to be created
func SetFileCreateLabel(fileLabel string) error {
if selinux.SelinuxEnabled() {
return selinux.Setfscreatecon(fileLabel)
if selinux.GetEnabled() {
return selinux.SetFSCreateLabel(fileLabel)
}
return nil
}
@ -123,7 +123,7 @@ func SetFileCreateLabel(fileLabel string) error {
// It changes the MCS label to s0 if shared is true.
// This will allow all containers to share the content.
func Relabel(path string, fileLabel string, shared bool) error {
if !selinux.SelinuxEnabled() {
if !selinux.GetEnabled() {
return nil
}
@ -147,14 +147,14 @@ func Relabel(path string, fileLabel string, shared bool) error {
return nil
}
// GetPidLabel will return the label of the process running with the specified pid
func GetPidLabel(pid int) (string, error) {
return selinux.Getpidcon(pid)
// PidLabel will return the label of the process running with the specified pid
func PidLabel(pid int) (string, error) {
return selinux.PidLabel(pid)
}
// Init initialises the labeling system
func Init() {
selinux.SelinuxEnabled()
selinux.GetEnabled()
}
// ReserveLabel will record the fact that the MCS label has already been used.
@ -165,15 +165,15 @@ func ReserveLabel(label string) error {
return nil
}
// UnreserveLabel will remove the reservation of the MCS label.
// ReleaseLabel will remove the reservation of the MCS label.
// This will allow InitLabels to use the MCS label in a newly created
// containers
func UnreserveLabel(label string) error {
selinux.FreeLxcContexts(label)
func ReleaseLabel(label string) error {
selinux.ReleaseLabel(label)
return nil
}
// DupSecOpt takes an process label and returns security options that
// DupSecOpt takes a process label and returns security options that
// can be used to set duplicate labels on future container processes
func DupSecOpt(src string) []string {
return selinux.DupSecOpt(src)

View File

@ -15,13 +15,14 @@ import (
"strings"
"sync"
"syscall"
"github.com/opencontainers/runc/libcontainer/system"
)
const (
Enforcing = 1
Permissive = 0
// Enforcing constant indicate SELinux is in enforcing mode
Enforcing = 1
// Permissive constant to indicate SELinux is in permissive mode
Permissive = 0
// Disabled constant to indicate SELinux is disabled
Disabled = -1
selinuxDir = "/etc/selinux/"
selinuxConfig = selinuxDir + "config"
@ -32,33 +33,74 @@ const (
stRdOnly = 0x01
)
type selinuxState struct {
enabledSet bool
enabled bool
selinuxfsSet bool
selinuxfs string
mcsList map[string]bool
sync.Mutex
}
var (
assignRegex = regexp.MustCompile(`^([^=]+)=(.*)$`)
mcsList = make(map[string]bool)
mcsLock sync.Mutex
selinuxfs = "unknown"
selinuxEnabled = false // Stores whether selinux is currently enabled
selinuxEnabledChecked = false // Stores whether selinux enablement has been checked or established yet
assignRegex = regexp.MustCompile(`^([^=]+)=(.*)$`)
state = selinuxState{
mcsList: make(map[string]bool),
}
)
type SELinuxContext map[string]string
// Context is a representation of the SELinux label broken into 4 parts
type Context map[string]string
func (s *selinuxState) setEnable(enabled bool) bool {
s.Lock()
defer s.Unlock()
s.enabledSet = true
s.enabled = enabled
return s.enabled
}
func (s *selinuxState) getEnabled() bool {
s.Lock()
enabled := s.enabled
enabledSet := s.enabledSet
s.Unlock()
if enabledSet {
return enabled
}
enabled = false
if fs := getSelinuxMountPoint(); fs != "" {
if con, _ := CurrentLabel(); con != "kernel" {
enabled = true
}
}
return s.setEnable(enabled)
}
// SetDisabled disables selinux support for the package
func SetDisabled() {
selinuxEnabled, selinuxEnabledChecked = false, true
state.setEnable(false)
}
// getSelinuxMountPoint returns the path to the mountpoint of an selinuxfs
// filesystem or an empty string if no mountpoint is found. Selinuxfs is
// a proc-like pseudo-filesystem that exposes the selinux policy API to
// processes. The existence of an selinuxfs mount is used to determine
// whether selinux is currently enabled or not.
func getSelinuxMountPoint() string {
if selinuxfs != "unknown" {
func (s *selinuxState) setSELinuxfs(selinuxfs string) string {
s.Lock()
defer s.Unlock()
s.selinuxfsSet = true
s.selinuxfs = selinuxfs
return s.selinuxfs
}
func (s *selinuxState) getSELinuxfs() string {
s.Lock()
selinuxfs := s.selinuxfs
selinuxfsSet := s.selinuxfsSet
s.Unlock()
if selinuxfsSet {
return selinuxfs
}
selinuxfs = ""
selinuxfs = ""
f, err := os.Open("/proc/self/mountinfo")
if err != nil {
return selinuxfs
@ -91,21 +133,21 @@ func getSelinuxMountPoint() string {
selinuxfs = ""
}
}
return selinuxfs
return s.setSELinuxfs(selinuxfs)
}
// SelinuxEnabled returns whether selinux is currently enabled.
func SelinuxEnabled() bool {
if selinuxEnabledChecked {
return selinuxEnabled
}
selinuxEnabledChecked = true
if fs := getSelinuxMountPoint(); fs != "" {
if con, _ := Getcon(); con != "kernel" {
selinuxEnabled = true
}
}
return selinuxEnabled
// getSelinuxMountPoint returns the path to the mountpoint of an selinuxfs
// filesystem or an empty string if no mountpoint is found. Selinuxfs is
// a proc-like pseudo-filesystem that exposes the selinux policy API to
// processes. The existence of an selinuxfs mount is used to determine
// whether selinux is currently enabled or not.
func getSelinuxMountPoint() string {
return state.getSELinuxfs()
}
// GetEnabled returns whether selinux is currently enabled.
func GetEnabled() bool {
return state.getEnabled()
}
func readConfig(target string) (value string) {
@ -166,43 +208,55 @@ func readCon(name string) (string, error) {
return val, err
}
// Setfilecon sets the SELinux label for this path or returns an error.
func Setfilecon(path string, scon string) error {
return system.Lsetxattr(path, xattrNameSelinux, []byte(scon), 0)
// SetFileLabel sets the SELinux label for this path or returns an error.
func SetFileLabel(path string, label string) error {
return lsetxattr(path, xattrNameSelinux, []byte(label), 0)
}
// Getfilecon returns the SELinux label for this path or returns an error.
func Getfilecon(path string) (string, error) {
con, err := system.Lgetxattr(path, xattrNameSelinux)
// Filecon returns the SELinux label for this path or returns an error.
func FileLabel(path string) (string, error) {
label, err := lgetxattr(path, xattrNameSelinux)
if err != nil {
return "", err
}
// Trim the NUL byte at the end of the byte buffer, if present.
if len(con) > 0 && con[len(con)-1] == '\x00' {
con = con[:len(con)-1]
if len(label) > 0 && label[len(label)-1] == '\x00' {
label = label[:len(label)-1]
}
return string(con), nil
return string(label), nil
}
func Setfscreatecon(scon string) error {
return writeCon(fmt.Sprintf("/proc/self/task/%d/attr/fscreate", syscall.Gettid()), scon)
/*
SetFSCreateLabel tells kernel the label to create all file system objects
created by this task. Setting label="" to return to default.
*/
func SetFSCreateLabel(label string) error {
return writeCon(fmt.Sprintf("/proc/self/task/%d/attr/fscreate", syscall.Gettid()), label)
}
func Getfscreatecon() (string, error) {
/*
FSCreateLabel returns the default label the kernel which the kernel is using
for file system objects created by this task. "" indicates default.
*/
func FSCreateLabel() (string, error) {
return readCon(fmt.Sprintf("/proc/self/task/%d/attr/fscreate", syscall.Gettid()))
}
// Getcon returns the SELinux label of the current process thread, or an error.
func Getcon() (string, error) {
// CurrentLabel returns the SELinux label of the current process thread, or an error.
func CurrentLabel() (string, error) {
return readCon(fmt.Sprintf("/proc/self/task/%d/attr/current", syscall.Gettid()))
}
// Getpidcon returns the SELinux label of the given pid, or an error.
func Getpidcon(pid int) (string, error) {
// PidLabel returns the SELinux label of the given pid, or an error.
func PidLabel(pid int) (string, error) {
return readCon(fmt.Sprintf("/proc/%d/attr/current", pid))
}
func Getexeccon() (string, error) {
/*
ExecLabel returns the SELinux label that the kernel will use for any programs
that are executed by the current process thread, or an error.
*/
func ExecLabel() (string, error) {
return readCon(fmt.Sprintf("/proc/self/task/%d/attr/exec", syscall.Gettid()))
}
@ -221,19 +275,25 @@ func writeCon(name string, val string) error {
return err
}
func Setexeccon(scon string) error {
return writeCon(fmt.Sprintf("/proc/self/task/%d/attr/exec", syscall.Gettid()), scon)
/*
SetExecLabel sets the SELinux label that the kernel will use for any programs
that are executed by the current process thread, or an error.
*/
func SetExecLabel(label string) error {
return writeCon(fmt.Sprintf("/proc/self/task/%d/attr/exec", syscall.Gettid()), label)
}
func (c SELinuxContext) Get() string {
// Get returns the Context as a string
func (c Context) Get() string {
return fmt.Sprintf("%s:%s:%s:%s", c["user"], c["role"], c["type"], c["level"])
}
func NewContext(scon string) SELinuxContext {
c := make(SELinuxContext)
// NewContext creates a new Context struct from the specified label
func NewContext(label string) Context {
c := make(Context)
if len(scon) != 0 {
con := strings.SplitN(scon, ":", 4)
if len(label) != 0 {
con := strings.SplitN(label, ":", 4)
c["user"] = con[0]
c["role"] = con[1]
c["type"] = con[2]
@ -242,9 +302,10 @@ func NewContext(scon string) SELinuxContext {
return c
}
func ReserveLabel(scon string) {
if len(scon) != 0 {
con := strings.SplitN(scon, ":", 4)
// ReserveLabel reserves the MLS/MCS level component of the specified label
func ReserveLabel(label string) {
if len(label) != 0 {
con := strings.SplitN(label, ":", 4)
mcsAdd(con[3])
}
}
@ -253,7 +314,8 @@ func selinuxEnforcePath() string {
return fmt.Sprintf("%s/enforce", selinuxPath)
}
func SelinuxGetEnforce() int {
// EnforceMode returns the current SELinux mode Enforcing, Permissive, Disabled
func EnforceMode() int {
var enforce int
enforceS, err := readCon(selinuxEnforcePath())
@ -268,11 +330,20 @@ func SelinuxGetEnforce() int {
return enforce
}
func SelinuxSetEnforce(mode int) error {
/*
SetEnforce sets the current SELinux mode Enforcing, Permissive.
Disabled is not valid, since this needs to be set at boot time.
*/
func SetEnforceMode(mode int) error {
return writeCon(selinuxEnforcePath(), fmt.Sprintf("%d", mode))
}
func SelinuxGetEnforceMode() int {
/*
DefaultEnforceMode returns the systems default SELinux mode Enforcing,
Permissive or Disabled. Note this is is just the default at boot time.
EnforceMode tells you the systems current mode.
*/
func DefaultEnforceMode() int {
switch readConfig(selinuxTag) {
case "enforcing":
return Enforcing
@ -283,22 +354,22 @@ func SelinuxGetEnforceMode() int {
}
func mcsAdd(mcs string) error {
mcsLock.Lock()
defer mcsLock.Unlock()
if mcsList[mcs] {
state.Lock()
defer state.Unlock()
if state.mcsList[mcs] {
return fmt.Errorf("MCS Label already exists")
}
mcsList[mcs] = true
state.mcsList[mcs] = true
return nil
}
func mcsDelete(mcs string) {
mcsLock.Lock()
mcsList[mcs] = false
mcsLock.Unlock()
state.Lock()
defer state.Unlock()
state.mcsList[mcs] = false
}
func IntToMcs(id int, catRange uint32) string {
func intToMcs(id int, catRange uint32) string {
var (
SETSIZE = int(catRange)
TIER = SETSIZE
@ -334,9 +405,7 @@ func uniqMcs(catRange uint32) string {
continue
} else {
if c1 > c2 {
t := c1
c1 = c2
c2 = t
c1, c2 = c2, c1
}
}
mcs = fmt.Sprintf("s0:c%d,c%d", c1, c2)
@ -348,26 +417,35 @@ func uniqMcs(catRange uint32) string {
return mcs
}
func FreeLxcContexts(scon string) {
if len(scon) != 0 {
con := strings.SplitN(scon, ":", 4)
/*
ReleaseLabel will unreserve the MLS/MCS Level field of the specified label.
Allowing it to be used by another process.
*/
func ReleaseLabel(label string) {
if len(label) != 0 {
con := strings.SplitN(label, ":", 4)
mcsDelete(con[3])
}
}
var roFileLabel string
func GetROFileLabel() (fileLabel string) {
// ROFileLabel returns the specified SELinux readonly file label
func ROFileLabel() (fileLabel string) {
return roFileLabel
}
func GetLxcContexts() (processLabel string, fileLabel string) {
/*
ContainerLabels returns an allocated processLabel and fileLabel to be used for
container labeling by the calling process.
*/
func ContainerLabels() (processLabel string, fileLabel string) {
var (
val, key string
bufin *bufio.Reader
)
if !SelinuxEnabled() {
if !GetEnabled() {
return "", ""
}
lxcPath := fmt.Sprintf("%s/contexts/lxc_contexts", getSELinuxPolicyRoot())
@ -419,7 +497,6 @@ func GetLxcContexts() (processLabel string, fileLabel string) {
roFileLabel = fileLabel
}
exit:
// mcs := IntToMcs(os.Getpid(), 1024)
mcs := uniqMcs(1024)
scon := NewContext(processLabel)
scon["level"] = mcs
@ -430,10 +507,15 @@ exit:
return processLabel, fileLabel
}
// SecurityCheckContext validates that the SELinux label is understood by the kernel
func SecurityCheckContext(val string) error {
return writeCon(fmt.Sprintf("%s.context", selinuxPath), val)
}
/*
CopyLevel returns a label with the MLS/MCS level from src label replaces on
the dest label.
*/
func CopyLevel(src, dest string) (string, error) {
if src == "" {
return "", nil
@ -458,31 +540,31 @@ func badPrefix(fpath string) error {
for _, prefix := range badprefixes {
if fpath == prefix || strings.HasPrefix(fpath, fmt.Sprintf("%s/", prefix)) {
return fmt.Errorf("Relabeling content in %s is not allowed.", prefix)
return fmt.Errorf("relabeling content in %s is not allowed", prefix)
}
}
return nil
}
// Chcon changes the fpath file object to the SELinux label scon.
// Chcon changes the fpath file object to the SELinux label label.
// If the fpath is a directory and recurse is true Chcon will walk the
// directory tree setting the label
func Chcon(fpath string, scon string, recurse bool) error {
if scon == "" {
func Chcon(fpath string, label string, recurse bool) error {
if label == "" {
return nil
}
if err := badPrefix(fpath); err != nil {
return err
}
callback := func(p string, info os.FileInfo, err error) error {
return Setfilecon(p, scon)
return SetFileLabel(p, label)
}
if recurse {
return filepath.Walk(fpath, callback)
}
return Setfilecon(fpath, scon)
return SetFileLabel(fpath, label)
}
// DupSecOpt takes an SELinux process label and returns security options that
@ -498,14 +580,14 @@ func DupSecOpt(src string) []string {
con["level"] == "" {
return nil
}
return []string{"label=user:" + con["user"],
"label=role:" + con["role"],
"label=type:" + con["type"],
"label=level:" + con["level"]}
return []string{"user:" + con["user"],
"role:" + con["role"],
"type:" + con["type"],
"level:" + con["level"]}
}
// DisableSecOpt returns a security opt that can be used to disabling SELinux
// labeling support for future container processes
func DisableSecOpt() []string {
return []string{"label=disable"}
return []string{"disable"}
}

View File

@ -0,0 +1,78 @@
// +build linux
package selinux
import (
"syscall"
"unsafe"
)
var _zero uintptr
// Returns a []byte slice if the xattr is set and nil otherwise
// Requires path and its attribute as arguments
func lgetxattr(path string, attr string) ([]byte, error) {
var sz int
pathBytes, err := syscall.BytePtrFromString(path)
if err != nil {
return nil, err
}
attrBytes, err := syscall.BytePtrFromString(attr)
if err != nil {
return nil, err
}
// Start with a 128 length byte array
sz = 128
dest := make([]byte, sz)
destBytes := unsafe.Pointer(&dest[0])
_sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0)
switch {
case errno == syscall.ENODATA:
return nil, errno
case errno == syscall.ENOTSUP:
return nil, errno
case errno == syscall.ERANGE:
// 128 byte array might just not be good enough,
// A dummy buffer is used ``uintptr(0)`` to get real size
// of the xattrs on disk
_sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(unsafe.Pointer(nil)), uintptr(0), 0, 0)
sz = int(_sz)
if sz < 0 {
return nil, errno
}
dest = make([]byte, sz)
destBytes := unsafe.Pointer(&dest[0])
_sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0)
if errno != 0 {
return nil, errno
}
case errno != 0:
return nil, errno
}
sz = int(_sz)
return dest[:sz], nil
}
func lsetxattr(path string, attr string, data []byte, flags int) error {
pathBytes, err := syscall.BytePtrFromString(path)
if err != nil {
return err
}
attrBytes, err := syscall.BytePtrFromString(attr)
if err != nil {
return err
}
var dataBytes unsafe.Pointer
if len(data) > 0 {
dataBytes = unsafe.Pointer(&data[0])
} else {
dataBytes = unsafe.Pointer(&_zero)
}
_, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0)
if errno != 0 {
return errno
}
return nil
}