Skip to content

Commit

Permalink
add support for docker volume ls & inspect
Browse files Browse the repository at this point in the history
Signed-off-by: Victor Vieux <[email protected]>
  • Loading branch information
vieux committed Sep 5, 2015
1 parent 6787aa8 commit 77c9690
Show file tree
Hide file tree
Showing 16 changed files with 204 additions and 1 deletion.
2 changes: 1 addition & 1 deletion Godeps/Godeps.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

10 changes: 10 additions & 0 deletions Godeps/_workspace/src/github.com/samalba/dockerclient/types.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

20 changes: 20 additions & 0 deletions api/handlers.go
Original file line number Diff line number Diff line change
Expand Up @@ -145,6 +145,16 @@ func getImagesJSON(c *context, w http.ResponseWriter, r *http.Request) {
json.NewEncoder(w).Encode(images)
}

// GET /volumes
func getVolumes(c *context, w http.ResponseWriter, r *http.Request) {
volumes := struct {
Volumes []*cluster.Volume
}{c.cluster.Volumes()}

w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(volumes)
}

// GET /containers/ps
// GET /containers/json
func getContainersJSON(c *context, w http.ResponseWriter, r *http.Request) {
Expand Down Expand Up @@ -532,6 +542,16 @@ func ping(c *context, w http.ResponseWriter, r *http.Request) {
w.Write([]byte{'O', 'K'})
}

// Proxy a request to the right node
func proxyVolume(c *context, w http.ResponseWriter, r *http.Request) {
var name = mux.Vars(r)["volumename"]
if volume := c.cluster.Volume(name); volume != nil {
proxy(c.tlsConfig, volume.Engine.Addr, w, r)
return
}
httpError(w, fmt.Sprintf("No such volume: %s", name), http.StatusNotFound)
}

// Proxy a request to the right node
func proxyContainer(c *context, w http.ResponseWriter, r *http.Request) {
name, container, err := getContainerFromVars(c, mux.Vars(r))
Expand Down
2 changes: 2 additions & 0 deletions api/primary.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,8 @@ var routes = map[string]map[string]handler{
"/containers/{name:.*}/stats": proxyContainer,
"/containers/{name:.*}/attach/ws": proxyHijack,
"/exec/{execid:.*}/json": proxyContainer,
"/volumes": getVolumes,
"/volumes/{volumename:.*}": proxyVolume,
},
"POST": {
"/auth": proxyRandom,
Expand Down
6 changes: 6 additions & 0 deletions cluster/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,12 @@ type Cluster interface {
// cluster.Containers().Get(IDOrName)
Container(IDOrName string) *Container

// Return all volumes
Volumes() []*Volume

// Return one volume from the cluster
Volume(name string) *Volume

// Pull images
// `callback` can be called multiple time
// `where` is where it is being pulled
Expand Down
35 changes: 35 additions & 0 deletions cluster/engine.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@ type Engine struct {
stopCh chan struct{}
containers map[string]*Container
images []*Image
volumes []*Volume
client dockerclient.Client
eventHandler EventHandler
healthy bool
Expand Down Expand Up @@ -103,6 +104,9 @@ func (e *Engine) ConnectWithClient(client dockerclient.Client) error {
return err
}

// Do not check error as older daemon don't support this call
e.RefreshVolumes()

// Start the update loop.
go e.refreshLoop()

Expand Down Expand Up @@ -198,6 +202,21 @@ func (e *Engine) RefreshImages() error {
return nil
}

// RefreshVolumes refreshes the list of volumes on the engine.
func (e *Engine) RefreshVolumes() error {
volumes, err := e.client.ListVolumes()
if err != nil {
return err
}
e.Lock()
e.volumes = nil
for _, volume := range volumes {
e.volumes = append(e.volumes, &Volume{Volume: *volume, Engine: e})
}
e.Unlock()
return nil
}

// RefreshContainers will refresh the list and status of containers running on the engine. If `full` is
// true, each container will be inspected.
// FIXME: unexport this method after mesos scheduler stops using it directly
Expand Down Expand Up @@ -312,6 +331,8 @@ func (e *Engine) refreshLoop() {

err = e.RefreshContainers(false)
if err == nil {
// Do not check error as older daemon don't support this call
e.RefreshVolumes()
err = e.RefreshImages()
}

Expand Down Expand Up @@ -521,6 +542,18 @@ func (e *Engine) Images(all bool) []*Image {
return images
}

// Volumes returns all the volumes in the engine
func (e *Engine) Volumes() []*Volume {
e.RLock()

volumes := make([]*Volume, 0, len(e.volumes))
for _, volume := range e.volumes {
volumes = append(volumes, volume)
}
e.RUnlock()
return volumes
}

// Image returns the image with IDOrName in the engine
func (e *Engine) Image(IDOrName string) *Image {
e.RLock()
Expand Down Expand Up @@ -549,9 +582,11 @@ func (e *Engine) handler(ev *dockerclient.Event, _ chan error, args ...interface
// If the container state changes, we have to do an inspect in
// order to update container.Info and get the new NetworkSettings.
e.refreshContainer(ev.Id, true)
e.RefreshVolumes()
default:
// Otherwise, do a "soft" refresh of the container.
e.refreshContainer(ev.Id, false)
e.RefreshVolumes()
}

// If there is no event handler registered, abort right now.
Expand Down
8 changes: 8 additions & 0 deletions cluster/engine_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@ func TestEngineCpusMemory(t *testing.T) {
client.On("Version").Return(mockVersion, nil)
client.On("ListContainers", true, false, "").Return([]dockerclient.Container{}, nil)
client.On("ListImages", mock.Anything).Return([]*dockerclient.Image{}, nil)
client.On("ListVolumes", mock.Anything).Return([]*dockerclient.Volume{}, nil)
client.On("StartMonitorEvents", mock.Anything, mock.Anything, mock.Anything).Return()

assert.NoError(t, engine.ConnectWithClient(client))
Expand All @@ -94,6 +95,7 @@ func TestEngineSpecs(t *testing.T) {
client.On("Version").Return(mockVersion, nil)
client.On("ListContainers", true, false, "").Return([]dockerclient.Container{}, nil)
client.On("ListImages", mock.Anything).Return([]*dockerclient.Image{}, nil)
client.On("ListVolumes", mock.Anything).Return([]*dockerclient.Volume{}, nil)
client.On("StartMonitorEvents", mock.Anything, mock.Anything, mock.Anything).Return()

assert.NoError(t, engine.ConnectWithClient(client))
Expand Down Expand Up @@ -123,6 +125,7 @@ func TestEngineState(t *testing.T) {
// The client will return one container at first, then a second one will appear.
client.On("ListContainers", true, false, "").Return([]dockerclient.Container{{Id: "one"}}, nil).Once()
client.On("ListImages", mock.Anything).Return([]*dockerclient.Image{}, nil).Once()
client.On("ListVolumes", mock.Anything).Return([]*dockerclient.Volume{}, nil)
client.On("InspectContainer", "one").Return(&dockerclient.ContainerInfo{Config: &dockerclient.ContainerConfig{CpuShares: 100}}, nil).Once()
client.On("ListContainers", true, false, fmt.Sprintf("{%q:[%q]}", "id", "two")).Return([]dockerclient.Container{{Id: "two"}}, nil).Once()
client.On("InspectContainer", "two").Return(&dockerclient.ContainerInfo{Config: &dockerclient.ContainerConfig{CpuShares: 100}}, nil).Once()
Expand Down Expand Up @@ -168,6 +171,7 @@ func TestCreateContainer(t *testing.T) {
client.On("StartMonitorEvents", mock.Anything, mock.Anything, mock.Anything).Return()
client.On("ListContainers", true, false, "").Return([]dockerclient.Container{}, nil).Once()
client.On("ListImages", mock.Anything).Return([]*dockerclient.Image{}, nil).Once()
client.On("ListVolumes", mock.Anything).Return([]*dockerclient.Volume{}, nil)
assert.NoError(t, engine.ConnectWithClient(client))
assert.True(t, engine.isConnected())

Expand All @@ -181,6 +185,7 @@ func TestCreateContainer(t *testing.T) {
client.On("CreateContainer", &mockConfig, name).Return(id, nil).Once()
client.On("ListContainers", true, false, fmt.Sprintf(`{"id":[%q]}`, id)).Return([]dockerclient.Container{{Id: id}}, nil).Once()
client.On("ListImages", mock.Anything).Return([]*dockerclient.Image{}, nil).Once()
client.On("ListVolumes", mock.Anything).Return([]*dockerclient.Volume{}, nil)
client.On("InspectContainer", id).Return(&dockerclient.ContainerInfo{Config: &config.ContainerConfig}, nil).Once()
container, err := engine.Create(config, name, false)
assert.Nil(t, err)
Expand All @@ -204,6 +209,7 @@ func TestCreateContainer(t *testing.T) {
client.On("CreateContainer", &mockConfig, name).Return(id, nil).Once()
client.On("ListContainers", true, false, fmt.Sprintf(`{"id":[%q]}`, id)).Return([]dockerclient.Container{{Id: id}}, nil).Once()
client.On("ListImages", mock.Anything).Return([]*dockerclient.Image{}, nil).Once()
client.On("ListVolumes", mock.Anything).Return([]*dockerclient.Volume{}, nil)
client.On("InspectContainer", id).Return(&dockerclient.ContainerInfo{Config: &config.ContainerConfig}, nil).Once()
container, err = engine.Create(config, name, true)
assert.Nil(t, err)
Expand Down Expand Up @@ -251,6 +257,7 @@ func TestUsedCpus(t *testing.T) {
client.On("StartMonitorEvents", mock.Anything, mock.Anything, mock.Anything).Return()
client.On("ListImages", mock.Anything).Return([]*dockerclient.Image{}, nil).Once()
client.On("ListContainers", true, false, "").Return([]dockerclient.Container{{Id: "test"}}, nil).Once()
client.On("ListVolumes", mock.Anything).Return([]*dockerclient.Volume{}, nil)
client.On("InspectContainer", "test").Return(&dockerclient.ContainerInfo{Config: &dockerclient.ContainerConfig{CpuShares: cpuShares}}, nil).Once()
engine.ConnectWithClient(client)

Expand Down Expand Up @@ -279,6 +286,7 @@ func TestContainerRemovedDuringRefresh(t *testing.T) {
client.On("ListImages", mock.Anything).Return([]*dockerclient.Image{}, nil)
client.On("StartMonitorEvents", mock.Anything, mock.Anything, mock.Anything).Return()
client.On("ListContainers", true, false, "").Return([]dockerclient.Container{container1, container2}, nil)
client.On("ListVolumes", mock.Anything).Return([]*dockerclient.Volume{}, nil)
client.On("InspectContainer", "c1").Return(info1, errors.New("Not found"))
client.On("InspectContainer", "c2").Return(info2, nil)

Expand Down
10 changes: 10 additions & 0 deletions cluster/mesos/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -292,6 +292,16 @@ func (c *Cluster) RenameContainer(container *cluster.Container, newName string)
return nil
}

// Volumes returns all the volumes in the cluster.
func (c *Cluster) Volumes() []*cluster.Volume {
return nil
}

// Volume returns the volume name in the cluster
func (c *Cluster) Volume(name string) *cluster.Volume {
return nil
}

// listNodes returns all the nodess in the cluster.
func (c *Cluster) listNodes() []*node.Node {
c.RLock()
Expand Down
33 changes: 33 additions & 0 deletions cluster/swarm/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -470,6 +470,39 @@ func (c *Cluster) Container(IDOrName string) *cluster.Container {

}

// Volumes returns all the volumes in the cluster.
func (c *Cluster) Volumes() []*cluster.Volume {
c.RLock()
defer c.RUnlock()

out := []*cluster.Volume{}
for _, e := range c.engines {
out = append(out, e.Volumes()...)
}

return out
}

// Volume returns the volume name in the cluster
func (c *Cluster) Volume(name string) *cluster.Volume {
// Abort immediately if the name is empty.
if len(name) == 0 {
return nil
}

c.RLock()
defer c.RUnlock()

for _, e := range c.engines {
for _, v := range e.Volumes() {
if v.Name == name {
return v
}
}
}
return nil
}

// listNodes returns all the engines in the cluster.
func (c *Cluster) listNodes() []*node.Node {
c.RLock()
Expand Down
3 changes: 3 additions & 0 deletions cluster/swarm/cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -130,6 +130,7 @@ func TestImportImage(t *testing.T) {
client.On("StartMonitorEvents", mock.Anything, mock.Anything, mock.Anything).Return()
client.On("ListContainers", true, false, "").Return([]dockerclient.Container{}, nil).Once()
client.On("ListImages", mock.Anything).Return([]*dockerclient.Image{}, nil)
client.On("ListVolumes", mock.Anything).Return([]*dockerclient.Volume{}, nil)

// connect client
engine.ConnectWithClient(client)
Expand Down Expand Up @@ -178,6 +179,7 @@ func TestLoadImage(t *testing.T) {
client.On("StartMonitorEvents", mock.Anything, mock.Anything, mock.Anything).Return()
client.On("ListContainers", true, false, "").Return([]dockerclient.Container{}, nil).Once()
client.On("ListImages", mock.Anything).Return([]*dockerclient.Image{}, nil)
client.On("ListVolumes", mock.Anything).Return([]*dockerclient.Volume{}, nil)

// connect client
engine.ConnectWithClient(client)
Expand Down Expand Up @@ -229,6 +231,7 @@ func TestTagImage(t *testing.T) {
client.On("StartMonitorEvents", mock.Anything, mock.Anything, mock.Anything).Return()
client.On("ListContainers", true, false, "").Return([]dockerclient.Container{}, nil).Once()
client.On("ListImages", mock.Anything).Return(images, nil)
client.On("ListVolumes", mock.Anything).Return([]*dockerclient.Volume{}, nil)

// connect client
engine.ConnectWithClient(client)
Expand Down
10 changes: 10 additions & 0 deletions cluster/volume.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
package cluster

import "github.com/samalba/dockerclient"

// Volume is exported
type Volume struct {
dockerclient.Volume

Engine *Engine
}
43 changes: 43 additions & 0 deletions test/integration/api/volume.bats
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
#!/usr/bin/env bats

load ../helpers

function teardown() {
swarm_manage_cleanup
stop_docker
}

@test "docker volume" {
start_docker_with_busybox 2
swarm_manage

# make sure no volume exist
run docker_swarm volume
[ "${#lines[@]}" -eq 1 ]

# run
docker_swarm run -d -v=/tmp busybox true

run docker_swarm volume
[ "${#lines[@]}" -eq 2 ]

docker_swarm run -d -v=/tmp busybox true

run docker_swarm volume
[ "${#lines[@]}" -eq 3 ]
}

@test "docker volume inspect" {
start_docker_with_busybox 2
swarm_manage

# run
docker_swarm run -d -v=/tmp busybox true

run docker_swarm volume ls -q
[ "${#lines[@]}" -eq 1 ]

run docker_swarm volume inspect ${output}
[ "${#lines[@]}" -eq 7 ]
[[ "${output}" == *"\"Driver\": \"local\""* ]]
}

0 comments on commit 77c9690

Please sign in to comment.