Fixes a race condition in client events monitoring

In cases where there is high latency (ie, not-local network)
`waitExitOrRemoved` was not receiving events for short-lived containers.
This caused the client to hang while waiting for a notification that the
container has stopped.

This happens because `client.Events()` returns immediately and spins a
goroutine up to process events. The problem here is it returns before
the request to the events endpoint is even made.
Even without high-latency issues, there is no guarantee that the
goroutine is even scheduled by the time the function returns.

Signed-off-by: Brian Goff <cpuguy83@gmail.com>
This commit is contained in:
Brian Goff 2016-12-15 13:07:27 -05:00
parent 7ca372f95e
commit bcb7147ae5
1 changed files with 5 additions and 0 deletions

View File

@ -22,17 +22,20 @@ func (cli *Client) Events(ctx context.Context, options types.EventsOptions) (<-c
messages := make(chan events.Message) messages := make(chan events.Message)
errs := make(chan error, 1) errs := make(chan error, 1)
started := make(chan struct{})
go func() { go func() {
defer close(errs) defer close(errs)
query, err := buildEventsQueryParams(cli.version, options) query, err := buildEventsQueryParams(cli.version, options)
if err != nil { if err != nil {
close(started)
errs <- err errs <- err
return return
} }
resp, err := cli.get(ctx, "/events", query, nil) resp, err := cli.get(ctx, "/events", query, nil)
if err != nil { if err != nil {
close(started)
errs <- err errs <- err
return return
} }
@ -40,6 +43,7 @@ func (cli *Client) Events(ctx context.Context, options types.EventsOptions) (<-c
decoder := json.NewDecoder(resp.body) decoder := json.NewDecoder(resp.body)
close(started)
for { for {
select { select {
case <-ctx.Done(): case <-ctx.Done():
@ -61,6 +65,7 @@ func (cli *Client) Events(ctx context.Context, options types.EventsOptions) (<-c
} }
} }
}() }()
<-started
return messages, errs return messages, errs
} }