Skip to content

Commit

Permalink
Add stats options to not prime the stats
Browse files Browse the repository at this point in the history
Metrics collectors generally don't need the daemon to prime the stats
with something to compare since they already have something to compare
with.
Before this change, the API does 2 collection cycles (which takes
roughly 2s) in order to provide comparison for CPU usage over 1s. This
was primarily added so that `docker stats --no-stream` had something to
compare against.

Really the CLI should have just made a 2nd call and done the comparison
itself rather than forcing it on all API consumers.
That ship has long sailed, though.

With this change, clients can set an option to just pull a single stat,
which is *at least* a full second faster:

Old:
```
time curl --unix-socket
/go/src/github.com/docker/docker/bundles/test-integration-shell/docker.sock
http://./containers/test/stats?stream=false\&one-shot=false > /dev/null
2>&1

real0m1.864s
user0m0.005s
sys0m0.007s

time curl --unix-socket
/go/src/github.com/docker/docker/bundles/test-integration-shell/docker.sock
http://./containers/test/stats?stream=false\&one-shot=false > /dev/null
2>&1

real0m1.173s
user0m0.010s
sys0m0.006s
```

New:
```
time curl --unix-socket
/go/src/github.com/docker/docker/bundles/test-integration-shell/docker.sock
http://./containers/test/stats?stream=false\&one-shot=true > /dev/null
2>&1
real0m0.680s
user0m0.008s
sys0m0.004s

time curl --unix-socket
/go/src/github.com/docker/docker/bundles/test-integration-shell/docker.sock
http://./containers/test/stats?stream=false\&one-shot=true > /dev/null
2>&1

real0m0.156s
user0m0.007s
sys0m0.007s
```

This fixes issues with downstreams ability to use the stats API to
collect metrics.

Signed-off-by: Brian Goff <[email protected]>
  • Loading branch information
cpuguy83 committed Feb 28, 2020
1 parent 40b2b4b commit ce1ceeb
Show file tree
Hide file tree
Showing 8 changed files with 51 additions and 2 deletions.
5 changes: 5 additions & 0 deletions api/server/router/container/container_routes.go
Original file line number Diff line number Diff line change
Expand Up @@ -105,9 +105,14 @@ func (s *containerRouter) getContainersStats(ctx context.Context, w http.Respons
if !stream {
w.Header().Set("Content-Type", "application/json")
}
var oneShot bool
if versions.GreaterThanOrEqualTo(httputils.VersionFromContext(ctx), "1.41") {
oneShot = httputils.BoolValueOrDefault(r, "one-shot", false)
}

config := &backend.ContainerStatsConfig{
Stream: stream,
OneShot: oneShot,
OutStream: w,
Version: httputils.VersionFromContext(ctx),
}
Expand Down
5 changes: 5 additions & 0 deletions api/swagger.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5685,6 +5685,11 @@ paths:
description: "Stream the output. If false, the stats will be output once and then it will disconnect."
type: "boolean"
default: true
- name: "one-shot"
in: "query"
description: "Only get a single stat instead of waiting for 2 cycles. Must be used with stream=false"
type: "boolean"
default: false
tags: ["Container"]
/containers/{id}/resize:
post:
Expand Down
1 change: 1 addition & 0 deletions api/types/backend/backend.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@ type LogSelector struct {
// behavior of a backend.ContainerStats() call.
type ContainerStatsConfig struct {
Stream bool
OneShot bool
OutStream io.Writer
Version string
}
Expand Down
16 changes: 16 additions & 0 deletions client/container_stats.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,3 +24,19 @@ func (cli *Client) ContainerStats(ctx context.Context, containerID string, strea
osType := getDockerOS(resp.header.Get("Server"))
return types.ContainerStats{Body: resp.body, OSType: osType}, err
}

// ContainerStatsOneShot gets a single stat entry from a container.
// It differs from `ContainerStats` in that the API should not wait to prime the stats
func (cli *Client) ContainerStatsOneShot(ctx context.Context, containerID string) (types.ContainerStats, error) {
query := url.Values{}
query.Set("stream", "0")
query.Set("one-shot", "1")

resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil)
if err != nil {
return types.ContainerStats{}, err
}

osType := getDockerOS(resp.header.Get("Server"))
return types.ContainerStats{Body: resp.body, OSType: osType}, err
}
1 change: 1 addition & 0 deletions client/interface.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,7 @@ type ContainerAPIClient interface {
ContainerRestart(ctx context.Context, container string, timeout *time.Duration) error
ContainerStatPath(ctx context.Context, container, path string) (types.ContainerPathStat, error)
ContainerStats(ctx context.Context, container string, stream bool) (types.ContainerStats, error)
ContainerStatsOneShot(ctx context.Context, container string) (types.ContainerStats, error)
ContainerStart(ctx context.Context, container string, options types.ContainerStartOptions) error
ContainerStop(ctx context.Context, container string, timeout *time.Duration) error
ContainerTop(ctx context.Context, container string, arguments []string) (containertypes.ContainerTopOKBody, error)
Expand Down
7 changes: 6 additions & 1 deletion daemon/stats.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ import (
"github.com/docker/docker/api/types/versions"
"github.com/docker/docker/api/types/versions/v1p20"
"github.com/docker/docker/container"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/pkg/ioutils"
)

Expand All @@ -30,6 +31,10 @@ func (daemon *Daemon) ContainerStats(ctx context.Context, prefixOrName string, c
return err
}

if config.Stream && config.OneShot {
return errdefs.InvalidParameter(errors.New("cannot have stream=true and one-shot=true"))
}

// If the container is either not running or restarting and requires no stream, return an empty stats.
if (!container.IsRunning() || container.IsRestarting()) && !config.Stream {
return json.NewEncoder(config.OutStream).Encode(&types.StatsJSON{
Expand Down Expand Up @@ -63,7 +68,7 @@ func (daemon *Daemon) ContainerStats(ctx context.Context, prefixOrName string, c
updates := daemon.subscribeToContainerStats(container)
defer daemon.unsubscribeToContainerStats(container, updates)

noStreamFirstFrame := true
noStreamFirstFrame := !config.OneShot
for {
select {
case v, ok := <-updates:
Expand Down
2 changes: 2 additions & 0 deletions docs/api/version-history.md
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,8 @@ keywords: "API, Docker, rcli, REST, documentation"
service.
* `GET /tasks/{id}` now includes `JobIteration` on the task if spawned from a
job-mode service.
* `GET /containers/{id}/stats` now accepts a query param (`one-shot`) which, when used with `stream=false` fetches a
single set of stats instead of waiting for two collection cycles to have 2 CPU stats over a 1 second period.

## v1.40 API changes

Expand Down
16 changes: 15 additions & 1 deletion integration/container/stats_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import (
"context"
"encoding/json"
"io"
"reflect"
"testing"
"time"

Expand Down Expand Up @@ -33,10 +34,23 @@ func TestStats(t *testing.T) {
assert.NilError(t, err)
defer resp.Body.Close()

var v *types.Stats
var v types.Stats
err = json.NewDecoder(resp.Body).Decode(&v)
assert.NilError(t, err)
assert.Check(t, is.Equal(int64(v.MemoryStats.Limit), info.MemTotal))
assert.Check(t, !reflect.DeepEqual(v.PreCPUStats, types.CPUStats{}))
err = json.NewDecoder(resp.Body).Decode(&v)
assert.Assert(t, is.ErrorContains(err, ""), io.EOF)

resp, err = client.ContainerStatsOneShot(ctx, cID)
assert.NilError(t, err)
defer resp.Body.Close()

v = types.Stats{}
err = json.NewDecoder(resp.Body).Decode(&v)
assert.NilError(t, err)
assert.Check(t, is.Equal(int64(v.MemoryStats.Limit), info.MemTotal))
assert.Check(t, is.DeepEqual(v.PreCPUStats, types.CPUStats{}))
err = json.NewDecoder(resp.Body).Decode(&v)
assert.Assert(t, is.ErrorContains(err, ""), io.EOF)
}

0 comments on commit ce1ceeb

Please sign in to comment.