diff --git a/builder/Makefile b/builder/Makefile index 7108fc1fbe..e8340aeb2b 100644 --- a/builder/Makefile +++ b/builder/Makefile @@ -70,4 +70,4 @@ test-unit: test-functional: @docker history deis/test-etcd >/dev/null 2>&1 || docker pull deis/test-etcd:latest - GOPATH=$(CURDIR)/../tests/_vendor:$(GOPATH) go test -v ./tests/... + GOPATH=`cd ../tests/ && godep path`:$(GOPATH) go test -v ./tests/... diff --git a/cache/Makefile b/cache/Makefile index 06b5bd0b0e..8a6e8099ef 100644 --- a/cache/Makefile +++ b/cache/Makefile @@ -56,4 +56,4 @@ test-unit: test-functional: @docker history deis/test-etcd >/dev/null 2>&1 || docker pull deis/test-etcd:latest - GOPATH=$(CURDIR)/../tests/_vendor:$(GOPATH) go test -v ./tests/... + GOPATH=`cd ../tests/ && godep path`:$(GOPATH) go test -v ./tests/... diff --git a/controller/Makefile b/controller/Makefile index 706e119feb..0002694ebb 100644 --- a/controller/Makefile +++ b/controller/Makefile @@ -72,4 +72,4 @@ test-unit: setup-venv test-style test-functional: @docker history deis/test-etcd >/dev/null 2>&1 || docker pull deis/test-etcd:latest @docker history deis/test-postgresql >/dev/null 2>&1 || docker pull deis/test-postgresql:latest - GOPATH=$(CURDIR)/../tests/_vendor:$(GOPATH) go test -v ./tests/... + GOPATH=`cd ../tests/ && godep path`:$(GOPATH) go test -v ./tests/... diff --git a/database/Makefile b/database/Makefile index 87e7f0ab75..b4be0a20e0 100644 --- a/database/Makefile +++ b/database/Makefile @@ -50,4 +50,4 @@ test-unit: test-functional: @docker history deis/test-etcd >/dev/null 2>&1 || docker pull deis/test-etcd:latest - GOPATH=$(CURDIR)/../tests/_vendor:$(GOPATH) go test -v ./tests/... + GOPATH=`cd ../tests/ && godep path`:$(GOPATH) go test -v ./tests/... diff --git a/logger/Makefile b/logger/Makefile index 17a6d3b3de..4d4fd592ba 100644 --- a/logger/Makefile +++ b/logger/Makefile @@ -67,7 +67,7 @@ test-unit: test-style test-functional: @docker history deis/test-etcd >/dev/null 2>&1 || docker pull deis/test-etcd:latest - GOPATH=$(CURDIR)/../tests/_vendor:$(GOPATH) go test -v ./tests/... + GOPATH=`cd ../tests/ && godep path`:$(GOPATH) go test -v ./tests/... coverage: go test -coverprofile coverage.out ./syslog diff --git a/registry/Makefile b/registry/Makefile index 1988f4291e..0a736d7a72 100644 --- a/registry/Makefile +++ b/registry/Makefile @@ -50,4 +50,4 @@ test-unit: test-functional: @docker history deis/test-etcd >/dev/null 2>&1 || docker pull deis/test-etcd:latest - GOPATH=$(CURDIR)/../tests/_vendor:$(GOPATH) go test -v ./tests/... + GOPATH=`cd ../tests/ && godep path`:$(GOPATH) go test -v ./tests/... diff --git a/router/Makefile b/router/Makefile index 31d57afd36..fbb6a38b4f 100644 --- a/router/Makefile +++ b/router/Makefile @@ -56,4 +56,4 @@ test-unit: test-functional: @docker history deis/test-etcd >/dev/null 2>&1 || docker pull deis/test-etcd:latest - GOPATH=$(CURDIR)/../tests/_vendor:$(GOPATH) go test -v ./tests/... + GOPATH=`cd ../tests/ && godep path`:$(GOPATH) go test -v ./tests/... diff --git a/store/Makefile b/store/Makefile index 7fca37bb06..234f7d8e56 100644 --- a/store/Makefile +++ b/store/Makefile @@ -108,4 +108,4 @@ test-unit: test-functional: @docker history deis/test-etcd >/dev/null 2>&1 || docker pull deis/test-etcd:latest - GOPATH=$(CURDIR)/../tests/_vendor:$(GOPATH) go test -v ./tests/... + GOPATH=`cd ../tests/ && godep path`:$(GOPATH) go test -v ./tests/... diff --git a/tests/Godeps/Godeps.json b/tests/Godeps/Godeps.json new file mode 100644 index 0000000000..289c917166 --- /dev/null +++ b/tests/Godeps/Godeps.json @@ -0,0 +1,214 @@ +{ + "ImportPath": "github.com/deis/deis/tests", + "GoVersion": "go1.3.3", + "Packages": [ + "./..." + ], + "Deps": [ + { + "ImportPath": "github.com/ThomasRooney/gexpect", + "Rev": "025428b8021ad3cef9602997658d9eca6c145d4e" + }, + { + "ImportPath": "github.com/coreos/go-etcd/etcd", + "Comment": "v0.2.0-rc1-120-g23142f6", + "Rev": "23142f6773a676cc2cae8dd0cb90b2ea761c853f" + }, + { + "ImportPath": "github.com/docker/docker/api", + "Comment": "v1.3.0", + "Rev": "c78088fe3d1b90640c637d8c3457de3caa0c7a24" + }, + { + "ImportPath": "github.com/docker/docker/daemon/graphdriver", + "Comment": "v1.3.0", + "Rev": "c78088fe3d1b90640c637d8c3457de3caa0c7a24" + }, + { + "ImportPath": "github.com/docker/docker/dockerversion", + "Comment": "v1.3.0", + "Rev": "c78088fe3d1b90640c637d8c3457de3caa0c7a24" + }, + { + "ImportPath": "github.com/docker/docker/engine", + "Comment": "v1.3.0", + "Rev": "c78088fe3d1b90640c637d8c3457de3caa0c7a24" + }, + { + "ImportPath": "github.com/docker/docker/graph", + "Comment": "v1.3.0", + "Rev": "c78088fe3d1b90640c637d8c3457de3caa0c7a24" + }, + { + "ImportPath": "github.com/docker/docker/image", + "Comment": "v1.3.0", + "Rev": "c78088fe3d1b90640c637d8c3457de3caa0c7a24" + }, + { + "ImportPath": "github.com/docker/docker/nat", + "Comment": "v1.3.0", + "Rev": "c78088fe3d1b90640c637d8c3457de3caa0c7a24" + }, + { + "ImportPath": "github.com/docker/docker/opts", + "Comment": "v1.3.0", + "Rev": "c78088fe3d1b90640c637d8c3457de3caa0c7a24" + }, + { + "ImportPath": "github.com/docker/docker/pkg/archive", + "Comment": "v1.3.0", + "Rev": "c78088fe3d1b90640c637d8c3457de3caa0c7a24" + }, + { + "ImportPath": "github.com/docker/docker/pkg/fileutils", + "Comment": "v1.3.0", + "Rev": "c78088fe3d1b90640c637d8c3457de3caa0c7a24" + }, + { + "ImportPath": "github.com/docker/docker/pkg/httputils", + "Comment": "v1.3.0", + "Rev": "c78088fe3d1b90640c637d8c3457de3caa0c7a24" + }, + { + "ImportPath": "github.com/docker/docker/pkg/ioutils", + "Comment": "v1.3.0", + "Rev": "c78088fe3d1b90640c637d8c3457de3caa0c7a24" + }, + { + "ImportPath": "github.com/docker/docker/pkg/log", + "Comment": "v1.3.0", + "Rev": "c78088fe3d1b90640c637d8c3457de3caa0c7a24" + }, + { + "ImportPath": "github.com/docker/docker/pkg/mflag", + "Comment": "v1.3.0", + "Rev": "c78088fe3d1b90640c637d8c3457de3caa0c7a24" + }, + { + "ImportPath": "github.com/docker/docker/pkg/mount", + "Comment": "v1.3.0", + "Rev": "c78088fe3d1b90640c637d8c3457de3caa0c7a24" + }, + { + "ImportPath": "github.com/docker/docker/pkg/parsers", + "Comment": "v1.3.0", + "Rev": "c78088fe3d1b90640c637d8c3457de3caa0c7a24" + }, + { + "ImportPath": "github.com/docker/docker/pkg/pools", + "Comment": "v1.3.0", + "Rev": "c78088fe3d1b90640c637d8c3457de3caa0c7a24" + }, + { + "ImportPath": "github.com/docker/docker/pkg/promise", + "Comment": "v1.3.0", + "Rev": "c78088fe3d1b90640c637d8c3457de3caa0c7a24" + }, + { + "ImportPath": "github.com/docker/docker/pkg/signal", + "Comment": "v1.3.0", + "Rev": "c78088fe3d1b90640c637d8c3457de3caa0c7a24" + }, + { + "ImportPath": "github.com/docker/docker/pkg/stdcopy", + "Comment": "v1.3.0", + "Rev": "c78088fe3d1b90640c637d8c3457de3caa0c7a24" + }, + { + "ImportPath": "github.com/docker/docker/pkg/sysinfo", + "Comment": "v1.3.0", + "Rev": "c78088fe3d1b90640c637d8c3457de3caa0c7a24" + }, + { + "ImportPath": "github.com/docker/docker/pkg/system", + "Comment": "v1.3.0", + "Rev": "c78088fe3d1b90640c637d8c3457de3caa0c7a24" + }, + { + "ImportPath": "github.com/docker/docker/pkg/tarsum", + "Comment": "v1.3.0", + "Rev": "c78088fe3d1b90640c637d8c3457de3caa0c7a24" + }, + { + "ImportPath": "github.com/docker/docker/pkg/term", + "Comment": "v1.3.0", + "Rev": "c78088fe3d1b90640c637d8c3457de3caa0c7a24" + }, + { + "ImportPath": "github.com/docker/docker/pkg/timeutils", + "Comment": "v1.3.0", + "Rev": "c78088fe3d1b90640c637d8c3457de3caa0c7a24" + }, + { + "ImportPath": "github.com/docker/docker/pkg/truncindex", + "Comment": "v1.3.0", + "Rev": "c78088fe3d1b90640c637d8c3457de3caa0c7a24" + }, + { + "ImportPath": "github.com/docker/docker/pkg/units", + "Comment": "v1.3.0", + "Rev": "c78088fe3d1b90640c637d8c3457de3caa0c7a24" + }, + { + "ImportPath": "github.com/docker/docker/pkg/version", + "Comment": "v1.3.0", + "Rev": "c78088fe3d1b90640c637d8c3457de3caa0c7a24" + }, + { + "ImportPath": "github.com/docker/docker/registry", + "Comment": "v1.3.0", + "Rev": "c78088fe3d1b90640c637d8c3457de3caa0c7a24" + }, + { + "ImportPath": "github.com/docker/docker/runconfig", + "Comment": "v1.3.0", + "Rev": "c78088fe3d1b90640c637d8c3457de3caa0c7a24" + }, + { + "ImportPath": "github.com/docker/docker/utils", + "Comment": "v1.3.0", + "Rev": "c78088fe3d1b90640c637d8c3457de3caa0c7a24" + }, + { + "ImportPath": "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar", + "Comment": "v1.3.0", + "Rev": "c78088fe3d1b90640c637d8c3457de3caa0c7a24" + }, + { + "ImportPath": "github.com/docker/libcontainer/cgroups", + "Comment": "v1.2.0-160-g7294213", + "Rev": "72942137ef110d9468be3a4855b687d3734794df" + }, + { + "ImportPath": "github.com/docker/libcontainer/devices", + "Comment": "v1.2.0-160-g7294213", + "Rev": "72942137ef110d9468be3a4855b687d3734794df" + }, + { + "ImportPath": "github.com/docker/libtrust", + "Rev": "6b7834910dcbb3021adc193411d01f65595445fb" + }, + { + "ImportPath": "github.com/gorilla/context", + "Rev": "14f550f51af52180c2eefed15e5fd18d63c0a64a" + }, + { + "ImportPath": "github.com/gorilla/mux", + "Rev": "e444e69cbd2e2e3e0749a2f3c717cec491552bbf" + }, + { + "ImportPath": "github.com/kballard/go-shellquote", + "Rev": "e5c918b80c17694cbc49aab32a759f9a40067f5d" + }, + { + "ImportPath": "github.com/kr/pty", + "Comment": "release.r56-19-g67e2db2", + "Rev": "67e2db24c831afa6c64fc17b4a143390674365ef" + }, + { + "ImportPath": "github.com/tchap/go-patricia/patricia", + "Comment": "v1.0.1", + "Rev": "f64d0a63cd3363481c898faa9339de04d12213f9" + } + ] +} diff --git a/tests/Godeps/Readme b/tests/Godeps/Readme new file mode 100644 index 0000000000..4cdaa53d56 --- /dev/null +++ b/tests/Godeps/Readme @@ -0,0 +1,5 @@ +This directory tree is generated automatically by godep. + +Please do not edit. + +See https://github.com/tools/godep for more information. diff --git a/tests/Godeps/_workspace/.gitignore b/tests/Godeps/_workspace/.gitignore new file mode 100644 index 0000000000..f037d684ef --- /dev/null +++ b/tests/Godeps/_workspace/.gitignore @@ -0,0 +1,2 @@ +/pkg +/bin diff --git a/tests/_vendor/src/github.com/ThomasRooney/gexpect/LICENCE b/tests/Godeps/_workspace/src/github.com/ThomasRooney/gexpect/LICENCE similarity index 100% rename from tests/_vendor/src/github.com/ThomasRooney/gexpect/LICENCE rename to tests/Godeps/_workspace/src/github.com/ThomasRooney/gexpect/LICENCE diff --git a/tests/Godeps/_workspace/src/github.com/ThomasRooney/gexpect/README.md b/tests/Godeps/_workspace/src/github.com/ThomasRooney/gexpect/README.md new file mode 100644 index 0000000000..784608e3d7 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/ThomasRooney/gexpect/README.md @@ -0,0 +1,53 @@ +## Gexpect + +Gexpect is a pure golang expect-like module. + +It makes it simple and safe to control other terminal applications. + +It provides pexpect-like syntax for golang + + child, err := gexpect.Spawn("python") + if err != nil { + panic(err) + } + child.Expect(">>>") + child.SendLine("print 'Hello World'") + child.Interact() + child.Close() + +It's fast, with its 'expect' function working off a variant of Knuth-Morris-Pratt on Standard Output/Error streams + +It also provides interface functions that make it much simpler to work with subprocesses + + child.Spawn("/bin/sh -c 'echo \"my complicated command\" | tee log | cat > log2'") + + child.ReadLine() // ReadLine() (string, error) + + child.ReadUntil(' ') // ReadUntil(delim byte) ([]byte, error) + + child.SendLine("/bin/sh -c 'echo Hello World | tee foo'") // SendLine(command string) (error) + + child.Wait() // Wait() (error) + + sender, reciever := child.AsyncInteractChannels() // AsyncInteractChannels() (chan string, chan string) + sender <- "echo Hello World\n" // Send to stdin + + line, open := <- reciever // Recieve a line from stdout/stderr + // When the subprocess stops (e.g. with child.Close()) , receiver is closed + if open { + fmt.Printf("Received %s", line)] + } + + +Free, MIT open source licenced, etc etc. + +Check gexpect_test.go and the examples folder for full examples + +### Golang Dependencies + + "github.com/kballard/go-shellquote" + "github.com/kr/pty" + +# Credits + + KMP Algorithm: "http://blog.databigbang.com/searching-for-substrings-in-streams-a-slight-modification-of-the-knuth-morris-pratt-algorithm-in-haxe/" \ No newline at end of file diff --git a/tests/Godeps/_workspace/src/github.com/ThomasRooney/gexpect/examples/python.go b/tests/Godeps/_workspace/src/github.com/ThomasRooney/gexpect/examples/python.go new file mode 100644 index 0000000000..ea1208a1f3 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/ThomasRooney/gexpect/examples/python.go @@ -0,0 +1,22 @@ +package main + +import "github.com/ThomasRooney/gexpect" +import "fmt" + +func main() { + fmt.Printf("Starting python.. \n") + child, err := gexpect.Spawn("python") + if err != nil { + panic(err) + } + fmt.Printf("Expecting >>>.. \n") + child.Expect(">>>") + fmt.Printf("print 'Hello World'..\n") + child.SendLine("print 'Hello World'") + child.Expect(">>>") + + fmt.Printf("Interacting.. \n") + child.Interact() + fmt.Printf("Done \n") + child.Close() +} diff --git a/tests/Godeps/_workspace/src/github.com/ThomasRooney/gexpect/examples/screen.go b/tests/Godeps/_workspace/src/github.com/ThomasRooney/gexpect/examples/screen.go new file mode 100644 index 0000000000..c94aab2c1f --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/ThomasRooney/gexpect/examples/screen.go @@ -0,0 +1,53 @@ +package main + +import "github.com/ThomasRooney/gexpect" +import "fmt" +import "strings" + +func main() { + waitChan := make(chan string) + + fmt.Printf("Starting screen.. \n") + + child, err := gexpect.Spawn("screen") + if err != nil { + panic(err) + } + + sender, reciever := child.AsyncInteractChannels() + go func() { + waitString := "" + count := 0 + for { + select { + case waitString = <-waitChan: + count++ + case msg, open := <-reciever: + if !open { + return + } + fmt.Printf("Recieved: %s\n", msg) + + if strings.Contains(msg, waitString) { + if count >= 1 { + waitChan <- msg + count -= 1 + } + } + } + } + }() + wait := func(str string) { + waitChan <- str + <-waitChan + } + fmt.Printf("Waiting until started.. \n") + wait(" ") + fmt.Printf("Sending Enter.. \n") + sender <- "\n" + wait("$") + fmt.Printf("Sending echo.. \n") + sender <- "echo Hello World\n" + wait("Hello World") + fmt.Printf("Received echo. \n") +} diff --git a/tests/_vendor/src/github.com/ThomasRooney/gexpect/gexpect.go b/tests/Godeps/_workspace/src/github.com/ThomasRooney/gexpect/gexpect.go similarity index 100% rename from tests/_vendor/src/github.com/ThomasRooney/gexpect/gexpect.go rename to tests/Godeps/_workspace/src/github.com/ThomasRooney/gexpect/gexpect.go diff --git a/tests/Godeps/_workspace/src/github.com/ThomasRooney/gexpect/gexpect_test.go b/tests/Godeps/_workspace/src/github.com/ThomasRooney/gexpect/gexpect_test.go new file mode 100644 index 0000000000..7c40cb8fd1 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/ThomasRooney/gexpect/gexpect_test.go @@ -0,0 +1,122 @@ +package gexpect + +import ( + "log" + "strings" + "testing" +) + +func TestHelloWorld(*testing.T) { + log.Printf("Testing Hello World... ") + child, err := Spawn("echo \"Hello World\"") + if err != nil { + panic(err) + } + err = child.Expect("Hello World") + if err != nil { + panic(err) + } + log.Printf("Success\n") +} + +func TestHelloWorldFailureCase(*testing.T) { + log.Printf("Testing Hello World Failure case... ") + child, err := Spawn("echo \"Hello World\"") + if err != nil { + panic(err) + } + err = child.Expect("YOU WILL NEVER FIND ME") + if err != nil { + log.Printf("Success\n") + return + } + panic("Expected an error for TestHelloWorldFailureCase") +} + +func TestBiChannel(*testing.T) { + log.Printf("Testing BiChannel screen... ") + child, err := Spawn("screen") + if err != nil { + panic(err) + } + sender, reciever := child.AsyncInteractChannels() + wait := func(str string) { + for { + msg, open := <-reciever + if !open { + return + } + if strings.Contains(msg, str) { + return + } + } + } + sender <- "\n" + sender <- "echo Hello World\n" + wait("Hello World") + sender <- "times\n" + wait("s") + sender <- "^D\n" + log.Printf("Success\n") + +} + +func TestExpectRegex(*testing.T) { + log.Printf("Testing ExpectRegex... ") + + child, err := Spawn("/bin/sh times") + if err != nil { + panic(err) + } + child.ExpectRegex("Name") + log.Printf("Success\n") + +} + +func TestCommandStart(*testing.T) { + log.Printf("Testing Command... ") + + // Doing this allows you to modify the cmd struct prior to execution, for example to add environment variables + child, err := Command("echo 'Hello World'") + if err != nil { + panic(err) + } + child.Start() + child.Expect("Hello World") + log.Printf("Success\n") +} + +func TestExpectFtp(*testing.T) { + log.Printf("Testing Ftp... ") + + child, err := Spawn("ftp ftp.openbsd.org") + if err != nil { + panic(err) + } + child.Expect("Name") + child.SendLine("anonymous") + child.Expect("Password") + child.SendLine("pexpect@sourceforge.net") + child.Expect("ftp> ") + child.SendLine("cd /pub/OpenBSD/3.7/packages/i386") + child.Expect("ftp> ") + child.SendLine("bin") + child.Expect("ftp> ") + child.SendLine("prompt") + child.Expect("ftp> ") + child.SendLine("pwd") + child.Expect("ftp> ") + log.Printf("Success\n") +} + +func TestInteractPing(*testing.T) { + log.Printf("Testing Ping interact... \n") + + child, err := Spawn("ping -c8 8.8.8.8") + if err != nil { + panic(err) + } + child.Interact() + log.Printf("Success\n") + +} diff --git a/tests/_vendor/src/github.com/coreos/go-etcd/etcd/add_child.go b/tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/add_child.go similarity index 100% rename from tests/_vendor/src/github.com/coreos/go-etcd/etcd/add_child.go rename to tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/add_child.go diff --git a/tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/add_child_test.go b/tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/add_child_test.go new file mode 100644 index 0000000000..26223ff1c8 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/add_child_test.go @@ -0,0 +1,73 @@ +package etcd + +import "testing" + +func TestAddChild(t *testing.T) { + c := NewClient(nil) + defer func() { + c.Delete("fooDir", true) + c.Delete("nonexistentDir", true) + }() + + c.CreateDir("fooDir", 5) + + _, err := c.AddChild("fooDir", "v0", 5) + if err != nil { + t.Fatal(err) + } + + _, err = c.AddChild("fooDir", "v1", 5) + if err != nil { + t.Fatal(err) + } + + resp, err := c.Get("fooDir", true, false) + // The child with v0 should proceed the child with v1 because it's added + // earlier, so it should have a lower key. + if !(len(resp.Node.Nodes) == 2 && (resp.Node.Nodes[0].Value == "v0" && resp.Node.Nodes[1].Value == "v1")) { + t.Fatalf("AddChild 1 failed. There should be two chlidren whose values are v0 and v1, respectively."+ + " The response was: %#v", resp) + } + + // Creating a child under a nonexistent directory should succeed. + // The directory should be created. + resp, err = c.AddChild("nonexistentDir", "foo", 5) + if err != nil { + t.Fatal(err) + } +} + +func TestAddChildDir(t *testing.T) { + c := NewClient(nil) + defer func() { + c.Delete("fooDir", true) + c.Delete("nonexistentDir", true) + }() + + c.CreateDir("fooDir", 5) + + _, err := c.AddChildDir("fooDir", 5) + if err != nil { + t.Fatal(err) + } + + _, err = c.AddChildDir("fooDir", 5) + if err != nil { + t.Fatal(err) + } + + resp, err := c.Get("fooDir", true, false) + // The child with v0 should proceed the child with v1 because it's added + // earlier, so it should have a lower key. + if !(len(resp.Node.Nodes) == 2 && (len(resp.Node.Nodes[0].Nodes) == 0 && len(resp.Node.Nodes[1].Nodes) == 0)) { + t.Fatalf("AddChildDir 1 failed. There should be two chlidren whose values are v0 and v1, respectively."+ + " The response was: %#v", resp) + } + + // Creating a child under a nonexistent directory should succeed. + // The directory should be created. + resp, err = c.AddChildDir("nonexistentDir", 5) + if err != nil { + t.Fatal(err) + } +} diff --git a/tests/_vendor/src/github.com/coreos/go-etcd/etcd/client.go b/tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/client.go similarity index 98% rename from tests/_vendor/src/github.com/coreos/go-etcd/etcd/client.go rename to tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/client.go index d5c28d5e47..f6ae548617 100644 --- a/tests/_vendor/src/github.com/coreos/go-etcd/etcd/client.go +++ b/tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/client.go @@ -231,6 +231,11 @@ func (c *Client) SetConsistency(consistency string) error { return nil } +// Sets the DialTimeout value +func (c *Client) SetDialTimeout(d time.Duration) { + c.config.DialTimeout = d +} + // AddRootCA adds a root CA cert for the etcd client func (c *Client) AddRootCA(caCert string) error { if c.httpClient == nil { diff --git a/tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/client_test.go b/tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/client_test.go new file mode 100644 index 0000000000..c245e47984 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/client_test.go @@ -0,0 +1,96 @@ +package etcd + +import ( + "encoding/json" + "fmt" + "net" + "net/url" + "os" + "testing" +) + +// To pass this test, we need to create a cluster of 3 machines +// The server should be listening on 127.0.0.1:4001, 4002, 4003 +func TestSync(t *testing.T) { + fmt.Println("Make sure there are three nodes at 0.0.0.0:4001-4003") + + // Explicit trailing slash to ensure this doesn't reproduce: + // https://github.com/coreos/go-etcd/issues/82 + c := NewClient([]string{"http://127.0.0.1:4001/"}) + + success := c.SyncCluster() + if !success { + t.Fatal("cannot sync machines") + } + + for _, m := range c.GetCluster() { + u, err := url.Parse(m) + if err != nil { + t.Fatal(err) + } + if u.Scheme != "http" { + t.Fatal("scheme must be http") + } + + host, _, err := net.SplitHostPort(u.Host) + if err != nil { + t.Fatal(err) + } + if host != "127.0.0.1" { + t.Fatal("Host must be 127.0.0.1") + } + } + + badMachines := []string{"abc", "edef"} + + success = c.SetCluster(badMachines) + + if success { + t.Fatal("should not sync on bad machines") + } + + goodMachines := []string{"127.0.0.1:4002"} + + success = c.SetCluster(goodMachines) + + if !success { + t.Fatal("cannot sync machines") + } else { + fmt.Println(c.cluster.Machines) + } + +} + +func TestPersistence(t *testing.T) { + c := NewClient(nil) + c.SyncCluster() + + fo, err := os.Create("config.json") + if err != nil { + t.Fatal(err) + } + defer func() { + if err := fo.Close(); err != nil { + panic(err) + } + }() + + c.SetPersistence(fo) + err = c.saveConfig() + if err != nil { + t.Fatal(err) + } + + c2, err := NewClientFromFile("config.json") + if err != nil { + t.Fatal(err) + } + + // Verify that the two clients have the same config + b1, _ := json.Marshal(c) + b2, _ := json.Marshal(c2) + + if string(b1) != string(b2) { + t.Fatalf("The two configs should be equal!") + } +} diff --git a/tests/_vendor/src/github.com/coreos/go-etcd/etcd/cluster.go b/tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/cluster.go similarity index 100% rename from tests/_vendor/src/github.com/coreos/go-etcd/etcd/cluster.go rename to tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/cluster.go diff --git a/tests/_vendor/src/github.com/coreos/go-etcd/etcd/compare_and_delete.go b/tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/compare_and_delete.go similarity index 100% rename from tests/_vendor/src/github.com/coreos/go-etcd/etcd/compare_and_delete.go rename to tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/compare_and_delete.go diff --git a/tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/compare_and_delete_test.go b/tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/compare_and_delete_test.go new file mode 100644 index 0000000000..223e50f291 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/compare_and_delete_test.go @@ -0,0 +1,46 @@ +package etcd + +import ( + "testing" +) + +func TestCompareAndDelete(t *testing.T) { + c := NewClient(nil) + defer func() { + c.Delete("foo", true) + }() + + c.Set("foo", "bar", 5) + + // This should succeed an correct prevValue + resp, err := c.CompareAndDelete("foo", "bar", 0) + if err != nil { + t.Fatal(err) + } + if !(resp.PrevNode.Value == "bar" && resp.PrevNode.Key == "/foo" && resp.PrevNode.TTL == 5) { + t.Fatalf("CompareAndDelete 1 prevNode failed: %#v", resp) + } + + resp, _ = c.Set("foo", "bar", 5) + // This should fail because it gives an incorrect prevValue + _, err = c.CompareAndDelete("foo", "xxx", 0) + if err == nil { + t.Fatalf("CompareAndDelete 2 should have failed. The response is: %#v", resp) + } + + // This should succeed because it gives an correct prevIndex + resp, err = c.CompareAndDelete("foo", "", resp.Node.ModifiedIndex) + if err != nil { + t.Fatal(err) + } + if !(resp.PrevNode.Value == "bar" && resp.PrevNode.Key == "/foo" && resp.PrevNode.TTL == 5) { + t.Fatalf("CompareAndSwap 3 prevNode failed: %#v", resp) + } + + c.Set("foo", "bar", 5) + // This should fail because it gives an incorrect prevIndex + resp, err = c.CompareAndDelete("foo", "", 29817514) + if err == nil { + t.Fatalf("CompareAndDelete 4 should have failed. The response is: %#v", resp) + } +} diff --git a/tests/_vendor/src/github.com/coreos/go-etcd/etcd/compare_and_swap.go b/tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/compare_and_swap.go similarity index 100% rename from tests/_vendor/src/github.com/coreos/go-etcd/etcd/compare_and_swap.go rename to tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/compare_and_swap.go diff --git a/tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/compare_and_swap_test.go b/tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/compare_and_swap_test.go new file mode 100644 index 0000000000..14a1b00f5a --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/compare_and_swap_test.go @@ -0,0 +1,57 @@ +package etcd + +import ( + "testing" +) + +func TestCompareAndSwap(t *testing.T) { + c := NewClient(nil) + defer func() { + c.Delete("foo", true) + }() + + c.Set("foo", "bar", 5) + + // This should succeed + resp, err := c.CompareAndSwap("foo", "bar2", 5, "bar", 0) + if err != nil { + t.Fatal(err) + } + if !(resp.Node.Value == "bar2" && resp.Node.Key == "/foo" && resp.Node.TTL == 5) { + t.Fatalf("CompareAndSwap 1 failed: %#v", resp) + } + + if !(resp.PrevNode.Value == "bar" && resp.PrevNode.Key == "/foo" && resp.PrevNode.TTL == 5) { + t.Fatalf("CompareAndSwap 1 prevNode failed: %#v", resp) + } + + // This should fail because it gives an incorrect prevValue + resp, err = c.CompareAndSwap("foo", "bar3", 5, "xxx", 0) + if err == nil { + t.Fatalf("CompareAndSwap 2 should have failed. The response is: %#v", resp) + } + + resp, err = c.Set("foo", "bar", 5) + if err != nil { + t.Fatal(err) + } + + // This should succeed + resp, err = c.CompareAndSwap("foo", "bar2", 5, "", resp.Node.ModifiedIndex) + if err != nil { + t.Fatal(err) + } + if !(resp.Node.Value == "bar2" && resp.Node.Key == "/foo" && resp.Node.TTL == 5) { + t.Fatalf("CompareAndSwap 3 failed: %#v", resp) + } + + if !(resp.PrevNode.Value == "bar" && resp.PrevNode.Key == "/foo" && resp.PrevNode.TTL == 5) { + t.Fatalf("CompareAndSwap 3 prevNode failed: %#v", resp) + } + + // This should fail because it gives an incorrect prevIndex + resp, err = c.CompareAndSwap("foo", "bar3", 5, "", 29817514) + if err == nil { + t.Fatalf("CompareAndSwap 4 should have failed. The response is: %#v", resp) + } +} diff --git a/tests/_vendor/src/github.com/coreos/go-etcd/etcd/debug.go b/tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/debug.go similarity index 83% rename from tests/_vendor/src/github.com/coreos/go-etcd/etcd/debug.go rename to tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/debug.go index 5c12d178d7..0f777886ba 100644 --- a/tests/_vendor/src/github.com/coreos/go-etcd/etcd/debug.go +++ b/tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/debug.go @@ -22,12 +22,12 @@ type etcdLogger struct { } func (p *etcdLogger) Debug(args ...interface{}) { - msg := "DEBUG: " + fmt.Sprint(args) + msg := "DEBUG: " + fmt.Sprint(args...) p.log.Println(msg) } func (p *etcdLogger) Debugf(f string, args ...interface{}) { - msg := "DEBUG: " + fmt.Sprintf(f, args) + msg := "DEBUG: " + fmt.Sprintf(f, args...) // Append newline if necessary if !strings.HasSuffix(msg, "\n") { msg = msg + "\n" @@ -36,12 +36,12 @@ func (p *etcdLogger) Debugf(f string, args ...interface{}) { } func (p *etcdLogger) Warning(args ...interface{}) { - msg := "WARNING: " + fmt.Sprint(args) + msg := "WARNING: " + fmt.Sprint(args...) p.log.Println(msg) } func (p *etcdLogger) Warningf(f string, args ...interface{}) { - msg := "WARNING: " + fmt.Sprintf(f, args) + msg := "WARNING: " + fmt.Sprintf(f, args...) // Append newline if necessary if !strings.HasSuffix(msg, "\n") { msg = msg + "\n" diff --git a/tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/debug_test.go b/tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/debug_test.go new file mode 100644 index 0000000000..97f6d1110b --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/debug_test.go @@ -0,0 +1,28 @@ +package etcd + +import ( + "testing" +) + +type Foo struct{} +type Bar struct { + one string + two int +} + +// Tests that logs don't panic with arbitrary interfaces +func TestDebug(t *testing.T) { + f := &Foo{} + b := &Bar{"asfd", 3} + for _, test := range []interface{}{ + 1234, + "asdf", + f, + b, + } { + logger.Debug(test) + logger.Debugf("something, %s", test) + logger.Warning(test) + logger.Warningf("something, %s", test) + } +} diff --git a/tests/_vendor/src/github.com/coreos/go-etcd/etcd/delete.go b/tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/delete.go similarity index 100% rename from tests/_vendor/src/github.com/coreos/go-etcd/etcd/delete.go rename to tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/delete.go diff --git a/tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/delete_test.go b/tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/delete_test.go new file mode 100644 index 0000000000..5904971556 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/delete_test.go @@ -0,0 +1,81 @@ +package etcd + +import ( + "testing" +) + +func TestDelete(t *testing.T) { + c := NewClient(nil) + defer func() { + c.Delete("foo", true) + }() + + c.Set("foo", "bar", 5) + resp, err := c.Delete("foo", false) + if err != nil { + t.Fatal(err) + } + + if !(resp.Node.Value == "") { + t.Fatalf("Delete failed with %s", resp.Node.Value) + } + + if !(resp.PrevNode.Value == "bar") { + t.Fatalf("Delete PrevNode failed with %s", resp.Node.Value) + } + + resp, err = c.Delete("foo", false) + if err == nil { + t.Fatalf("Delete should have failed because the key foo did not exist. "+ + "The response was: %v", resp) + } +} + +func TestDeleteAll(t *testing.T) { + c := NewClient(nil) + defer func() { + c.Delete("foo", true) + c.Delete("fooDir", true) + }() + + c.SetDir("foo", 5) + // test delete an empty dir + resp, err := c.DeleteDir("foo") + if err != nil { + t.Fatal(err) + } + + if !(resp.Node.Value == "") { + t.Fatalf("DeleteAll 1 failed: %#v", resp) + } + + if !(resp.PrevNode.Dir == true && resp.PrevNode.Value == "") { + t.Fatalf("DeleteAll 1 PrevNode failed: %#v", resp) + } + + c.CreateDir("fooDir", 5) + c.Set("fooDir/foo", "bar", 5) + _, err = c.DeleteDir("fooDir") + if err == nil { + t.Fatal("should not able to delete a non-empty dir with deletedir") + } + + resp, err = c.Delete("fooDir", true) + if err != nil { + t.Fatal(err) + } + + if !(resp.Node.Value == "") { + t.Fatalf("DeleteAll 2 failed: %#v", resp) + } + + if !(resp.PrevNode.Dir == true && resp.PrevNode.Value == "") { + t.Fatalf("DeleteAll 2 PrevNode failed: %#v", resp) + } + + resp, err = c.Delete("foo", true) + if err == nil { + t.Fatalf("DeleteAll should have failed because the key foo did not exist. "+ + "The response was: %v", resp) + } +} diff --git a/tests/_vendor/src/github.com/coreos/go-etcd/etcd/error.go b/tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/error.go similarity index 100% rename from tests/_vendor/src/github.com/coreos/go-etcd/etcd/error.go rename to tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/error.go diff --git a/tests/_vendor/src/github.com/coreos/go-etcd/etcd/get.go b/tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/get.go similarity index 100% rename from tests/_vendor/src/github.com/coreos/go-etcd/etcd/get.go rename to tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/get.go diff --git a/tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/get_test.go b/tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/get_test.go new file mode 100644 index 0000000000..279c4e26f8 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/get_test.go @@ -0,0 +1,131 @@ +package etcd + +import ( + "reflect" + "testing" +) + +// cleanNode scrubs Expiration, ModifiedIndex and CreatedIndex of a node. +func cleanNode(n *Node) { + n.Expiration = nil + n.ModifiedIndex = 0 + n.CreatedIndex = 0 +} + +// cleanResult scrubs a result object two levels deep of Expiration, +// ModifiedIndex and CreatedIndex. +func cleanResult(result *Response) { + // TODO(philips): make this recursive. + cleanNode(result.Node) + for i, _ := range result.Node.Nodes { + cleanNode(result.Node.Nodes[i]) + for j, _ := range result.Node.Nodes[i].Nodes { + cleanNode(result.Node.Nodes[i].Nodes[j]) + } + } +} + +func TestGet(t *testing.T) { + c := NewClient(nil) + defer func() { + c.Delete("foo", true) + }() + + c.Set("foo", "bar", 5) + + result, err := c.Get("foo", false, false) + + if err != nil { + t.Fatal(err) + } + + if result.Node.Key != "/foo" || result.Node.Value != "bar" { + t.Fatalf("Get failed with %s %s %v", result.Node.Key, result.Node.Value, result.Node.TTL) + } + + result, err = c.Get("goo", false, false) + if err == nil { + t.Fatalf("should not be able to get non-exist key") + } +} + +func TestGetAll(t *testing.T) { + c := NewClient(nil) + defer func() { + c.Delete("fooDir", true) + }() + + c.CreateDir("fooDir", 5) + c.Set("fooDir/k0", "v0", 5) + c.Set("fooDir/k1", "v1", 5) + + // Return kv-pairs in sorted order + result, err := c.Get("fooDir", true, false) + + if err != nil { + t.Fatal(err) + } + + expected := Nodes{ + &Node{ + Key: "/fooDir/k0", + Value: "v0", + TTL: 5, + }, + &Node{ + Key: "/fooDir/k1", + Value: "v1", + TTL: 5, + }, + } + + cleanResult(result) + + if !reflect.DeepEqual(result.Node.Nodes, expected) { + t.Fatalf("(actual) %v != (expected) %v", result.Node.Nodes, expected) + } + + // Test the `recursive` option + c.CreateDir("fooDir/childDir", 5) + c.Set("fooDir/childDir/k2", "v2", 5) + + // Return kv-pairs in sorted order + result, err = c.Get("fooDir", true, true) + + cleanResult(result) + + if err != nil { + t.Fatal(err) + } + + expected = Nodes{ + &Node{ + Key: "/fooDir/childDir", + Dir: true, + Nodes: Nodes{ + &Node{ + Key: "/fooDir/childDir/k2", + Value: "v2", + TTL: 5, + }, + }, + TTL: 5, + }, + &Node{ + Key: "/fooDir/k0", + Value: "v0", + TTL: 5, + }, + &Node{ + Key: "/fooDir/k1", + Value: "v1", + TTL: 5, + }, + } + + cleanResult(result) + + if !reflect.DeepEqual(result.Node.Nodes, expected) { + t.Fatalf("(actual) %v != (expected) %v", result.Node.Nodes, expected) + } +} diff --git a/tests/_vendor/src/github.com/coreos/go-etcd/etcd/options.go b/tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/options.go similarity index 100% rename from tests/_vendor/src/github.com/coreos/go-etcd/etcd/options.go rename to tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/options.go diff --git a/tests/_vendor/src/github.com/coreos/go-etcd/etcd/requests.go b/tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/requests.go similarity index 98% rename from tests/_vendor/src/github.com/coreos/go-etcd/etcd/requests.go rename to tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/requests.go index 692623e288..5d8b45a2d3 100644 --- a/tests/_vendor/src/github.com/coreos/go-etcd/etcd/requests.go +++ b/tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/requests.go @@ -268,6 +268,12 @@ func (c *Client) SendRequest(rr *RawRequest) (*RawResponse, error) { logger.Debug("recv.success.", httpPath) break } + // ReadAll error may be caused due to cancel request + select { + case <-cancelled: + return nil, ErrRequestCancelled + default: + } } // if resp is TemporaryRedirect, set the new leader and retry diff --git a/tests/_vendor/src/github.com/coreos/go-etcd/etcd/response.go b/tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/response.go similarity index 100% rename from tests/_vendor/src/github.com/coreos/go-etcd/etcd/response.go rename to tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/response.go diff --git a/tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/set_curl_chan_test.go b/tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/set_curl_chan_test.go new file mode 100644 index 0000000000..756e317815 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/set_curl_chan_test.go @@ -0,0 +1,42 @@ +package etcd + +import ( + "fmt" + "testing" +) + +func TestSetCurlChan(t *testing.T) { + c := NewClient(nil) + c.OpenCURL() + + defer func() { + c.Delete("foo", true) + }() + + _, err := c.Set("foo", "bar", 5) + if err != nil { + t.Fatal(err) + } + + expected := fmt.Sprintf("curl -X PUT %s/v2/keys/foo -d value=bar -d ttl=5", + c.cluster.Leader) + actual := c.RecvCURL() + if expected != actual { + t.Fatalf(`Command "%s" is not equal to expected value "%s"`, + actual, expected) + } + + c.SetConsistency(STRONG_CONSISTENCY) + _, err = c.Get("foo", false, false) + if err != nil { + t.Fatal(err) + } + + expected = fmt.Sprintf("curl -X GET %s/v2/keys/foo?consistent=true&recursive=false&sorted=false", + c.cluster.Leader) + actual = c.RecvCURL() + if expected != actual { + t.Fatalf(`Command "%s" is not equal to expected value "%s"`, + actual, expected) + } +} diff --git a/tests/_vendor/src/github.com/coreos/go-etcd/etcd/set_update_create.go b/tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/set_update_create.go similarity index 100% rename from tests/_vendor/src/github.com/coreos/go-etcd/etcd/set_update_create.go rename to tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/set_update_create.go diff --git a/tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/set_update_create_test.go b/tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/set_update_create_test.go new file mode 100644 index 0000000000..ced0f06e7b --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/set_update_create_test.go @@ -0,0 +1,241 @@ +package etcd + +import ( + "testing" +) + +func TestSet(t *testing.T) { + c := NewClient(nil) + defer func() { + c.Delete("foo", true) + }() + + resp, err := c.Set("foo", "bar", 5) + if err != nil { + t.Fatal(err) + } + if resp.Node.Key != "/foo" || resp.Node.Value != "bar" || resp.Node.TTL != 5 { + t.Fatalf("Set 1 failed: %#v", resp) + } + if resp.PrevNode != nil { + t.Fatalf("Set 1 PrevNode failed: %#v", resp) + } + + resp, err = c.Set("foo", "bar2", 5) + if err != nil { + t.Fatal(err) + } + if !(resp.Node.Key == "/foo" && resp.Node.Value == "bar2" && resp.Node.TTL == 5) { + t.Fatalf("Set 2 failed: %#v", resp) + } + if resp.PrevNode.Key != "/foo" || resp.PrevNode.Value != "bar" || resp.Node.TTL != 5 { + t.Fatalf("Set 2 PrevNode failed: %#v", resp) + } +} + +func TestUpdate(t *testing.T) { + c := NewClient(nil) + defer func() { + c.Delete("foo", true) + c.Delete("nonexistent", true) + }() + + resp, err := c.Set("foo", "bar", 5) + + if err != nil { + t.Fatal(err) + } + + // This should succeed. + resp, err = c.Update("foo", "wakawaka", 5) + if err != nil { + t.Fatal(err) + } + + if !(resp.Action == "update" && resp.Node.Key == "/foo" && resp.Node.TTL == 5) { + t.Fatalf("Update 1 failed: %#v", resp) + } + if !(resp.PrevNode.Key == "/foo" && resp.PrevNode.Value == "bar" && resp.Node.TTL == 5) { + t.Fatalf("Update 1 prevValue failed: %#v", resp) + } + + // This should fail because the key does not exist. + resp, err = c.Update("nonexistent", "whatever", 5) + if err == nil { + t.Fatalf("The key %v did not exist, so the update should have failed."+ + "The response was: %#v", resp.Node.Key, resp) + } +} + +func TestCreate(t *testing.T) { + c := NewClient(nil) + defer func() { + c.Delete("newKey", true) + }() + + newKey := "/newKey" + newValue := "/newValue" + + // This should succeed + resp, err := c.Create(newKey, newValue, 5) + if err != nil { + t.Fatal(err) + } + + if !(resp.Action == "create" && resp.Node.Key == newKey && + resp.Node.Value == newValue && resp.Node.TTL == 5) { + t.Fatalf("Create 1 failed: %#v", resp) + } + if resp.PrevNode != nil { + t.Fatalf("Create 1 PrevNode failed: %#v", resp) + } + + // This should fail, because the key is already there + resp, err = c.Create(newKey, newValue, 5) + if err == nil { + t.Fatalf("The key %v did exist, so the creation should have failed."+ + "The response was: %#v", resp.Node.Key, resp) + } +} + +func TestCreateInOrder(t *testing.T) { + c := NewClient(nil) + dir := "/queue" + defer func() { + c.DeleteDir(dir) + }() + + var firstKey, secondKey string + + resp, err := c.CreateInOrder(dir, "1", 5) + if err != nil { + t.Fatal(err) + } + + if !(resp.Action == "create" && resp.Node.Value == "1" && resp.Node.TTL == 5) { + t.Fatalf("Create 1 failed: %#v", resp) + } + + firstKey = resp.Node.Key + + resp, err = c.CreateInOrder(dir, "2", 5) + if err != nil { + t.Fatal(err) + } + + if !(resp.Action == "create" && resp.Node.Value == "2" && resp.Node.TTL == 5) { + t.Fatalf("Create 2 failed: %#v", resp) + } + + secondKey = resp.Node.Key + + if firstKey >= secondKey { + t.Fatalf("Expected first key to be greater than second key, but %s is not greater than %s", + firstKey, secondKey) + } +} + +func TestSetDir(t *testing.T) { + c := NewClient(nil) + defer func() { + c.Delete("foo", true) + c.Delete("fooDir", true) + }() + + resp, err := c.CreateDir("fooDir", 5) + if err != nil { + t.Fatal(err) + } + if !(resp.Node.Key == "/fooDir" && resp.Node.Value == "" && resp.Node.TTL == 5) { + t.Fatalf("SetDir 1 failed: %#v", resp) + } + if resp.PrevNode != nil { + t.Fatalf("SetDir 1 PrevNode failed: %#v", resp) + } + + // This should fail because /fooDir already points to a directory + resp, err = c.CreateDir("/fooDir", 5) + if err == nil { + t.Fatalf("fooDir already points to a directory, so SetDir should have failed."+ + "The response was: %#v", resp) + } + + _, err = c.Set("foo", "bar", 5) + if err != nil { + t.Fatal(err) + } + + // This should succeed + // It should replace the key + resp, err = c.SetDir("foo", 5) + if err != nil { + t.Fatal(err) + } + if !(resp.Node.Key == "/foo" && resp.Node.Value == "" && resp.Node.TTL == 5) { + t.Fatalf("SetDir 2 failed: %#v", resp) + } + if !(resp.PrevNode.Key == "/foo" && resp.PrevNode.Value == "bar" && resp.PrevNode.TTL == 5) { + t.Fatalf("SetDir 2 failed: %#v", resp) + } +} + +func TestUpdateDir(t *testing.T) { + c := NewClient(nil) + defer func() { + c.Delete("fooDir", true) + }() + + resp, err := c.CreateDir("fooDir", 5) + if err != nil { + t.Fatal(err) + } + + // This should succeed. + resp, err = c.UpdateDir("fooDir", 5) + if err != nil { + t.Fatal(err) + } + + if !(resp.Action == "update" && resp.Node.Key == "/fooDir" && + resp.Node.Value == "" && resp.Node.TTL == 5) { + t.Fatalf("UpdateDir 1 failed: %#v", resp) + } + if !(resp.PrevNode.Key == "/fooDir" && resp.PrevNode.Dir == true && resp.PrevNode.TTL == 5) { + t.Fatalf("UpdateDir 1 PrevNode failed: %#v", resp) + } + + // This should fail because the key does not exist. + resp, err = c.UpdateDir("nonexistentDir", 5) + if err == nil { + t.Fatalf("The key %v did not exist, so the update should have failed."+ + "The response was: %#v", resp.Node.Key, resp) + } +} + +func TestCreateDir(t *testing.T) { + c := NewClient(nil) + defer func() { + c.Delete("fooDir", true) + }() + + // This should succeed + resp, err := c.CreateDir("fooDir", 5) + if err != nil { + t.Fatal(err) + } + + if !(resp.Action == "create" && resp.Node.Key == "/fooDir" && + resp.Node.Value == "" && resp.Node.TTL == 5) { + t.Fatalf("CreateDir 1 failed: %#v", resp) + } + if resp.PrevNode != nil { + t.Fatalf("CreateDir 1 PrevNode failed: %#v", resp) + } + + // This should fail, because the key is already there + resp, err = c.CreateDir("fooDir", 5) + if err == nil { + t.Fatalf("The key %v did exist, so the creation should have failed."+ + "The response was: %#v", resp.Node.Key, resp) + } +} diff --git a/tests/_vendor/src/github.com/coreos/go-etcd/etcd/version.go b/tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/version.go similarity index 100% rename from tests/_vendor/src/github.com/coreos/go-etcd/etcd/version.go rename to tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/version.go diff --git a/tests/_vendor/src/github.com/coreos/go-etcd/etcd/watch.go b/tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/watch.go similarity index 100% rename from tests/_vendor/src/github.com/coreos/go-etcd/etcd/watch.go rename to tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/watch.go diff --git a/tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/watch_test.go b/tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/watch_test.go new file mode 100644 index 0000000000..43e1dfeb81 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/watch_test.go @@ -0,0 +1,119 @@ +package etcd + +import ( + "fmt" + "runtime" + "testing" + "time" +) + +func TestWatch(t *testing.T) { + c := NewClient(nil) + defer func() { + c.Delete("watch_foo", true) + }() + + go setHelper("watch_foo", "bar", c) + + resp, err := c.Watch("watch_foo", 0, false, nil, nil) + if err != nil { + t.Fatal(err) + } + if !(resp.Node.Key == "/watch_foo" && resp.Node.Value == "bar") { + t.Fatalf("Watch 1 failed: %#v", resp) + } + + go setHelper("watch_foo", "bar", c) + + resp, err = c.Watch("watch_foo", resp.Node.ModifiedIndex+1, false, nil, nil) + if err != nil { + t.Fatal(err) + } + if !(resp.Node.Key == "/watch_foo" && resp.Node.Value == "bar") { + t.Fatalf("Watch 2 failed: %#v", resp) + } + + routineNum := runtime.NumGoroutine() + + ch := make(chan *Response, 10) + stop := make(chan bool, 1) + + go setLoop("watch_foo", "bar", c) + + go receiver(ch, stop) + + _, err = c.Watch("watch_foo", 0, false, ch, stop) + if err != ErrWatchStoppedByUser { + t.Fatalf("Watch returned a non-user stop error") + } + + if newRoutineNum := runtime.NumGoroutine(); newRoutineNum != routineNum { + t.Fatalf("Routine numbers differ after watch stop: %v, %v", routineNum, newRoutineNum) + } +} + +func TestWatchAll(t *testing.T) { + c := NewClient(nil) + defer func() { + c.Delete("watch_foo", true) + }() + + go setHelper("watch_foo/foo", "bar", c) + + resp, err := c.Watch("watch_foo", 0, true, nil, nil) + if err != nil { + t.Fatal(err) + } + if !(resp.Node.Key == "/watch_foo/foo" && resp.Node.Value == "bar") { + t.Fatalf("WatchAll 1 failed: %#v", resp) + } + + go setHelper("watch_foo/foo", "bar", c) + + resp, err = c.Watch("watch_foo", resp.Node.ModifiedIndex+1, true, nil, nil) + if err != nil { + t.Fatal(err) + } + if !(resp.Node.Key == "/watch_foo/foo" && resp.Node.Value == "bar") { + t.Fatalf("WatchAll 2 failed: %#v", resp) + } + + ch := make(chan *Response, 10) + stop := make(chan bool, 1) + + routineNum := runtime.NumGoroutine() + + go setLoop("watch_foo/foo", "bar", c) + + go receiver(ch, stop) + + _, err = c.Watch("watch_foo", 0, true, ch, stop) + if err != ErrWatchStoppedByUser { + t.Fatalf("Watch returned a non-user stop error") + } + + if newRoutineNum := runtime.NumGoroutine(); newRoutineNum != routineNum { + t.Fatalf("Routine numbers differ after watch stop: %v, %v", routineNum, newRoutineNum) + } +} + +func setHelper(key, value string, c *Client) { + time.Sleep(time.Second) + c.Set(key, value, 100) +} + +func setLoop(key, value string, c *Client) { + time.Sleep(time.Second) + for i := 0; i < 10; i++ { + newValue := fmt.Sprintf("%s_%v", value, i) + c.Set(key, newValue, 100) + time.Sleep(time.Second / 10) + } +} + +func receiver(c chan *Response, stop chan bool) { + for i := 0; i < 10; i++ { + <-c + } + stop <- true +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/api/MAINTAINERS b/tests/Godeps/_workspace/src/github.com/docker/docker/api/MAINTAINERS new file mode 100644 index 0000000000..e0f18f14f1 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/api/MAINTAINERS @@ -0,0 +1 @@ +Victor Vieux (@vieux) diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/api/README.md b/tests/Godeps/_workspace/src/github.com/docker/docker/api/README.md new file mode 100644 index 0000000000..453f61a1a1 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/api/README.md @@ -0,0 +1,5 @@ +This directory contains code pertaining to the Docker API: + + - Used by the docker client when communicating with the docker daemon + + - Used by third party tools wishing to interface with the docker daemon diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/api/api_unit_test.go b/tests/Godeps/_workspace/src/github.com/docker/docker/api/api_unit_test.go new file mode 100644 index 0000000000..678331d369 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/api/api_unit_test.go @@ -0,0 +1,19 @@ +package api + +import ( + "testing" +) + +func TestJsonContentType(t *testing.T) { + if !MatchesContentType("application/json", "application/json") { + t.Fail() + } + + if !MatchesContentType("application/json; charset=utf-8", "application/json") { + t.Fail() + } + + if MatchesContentType("dockerapplication/json", "application/json") { + t.Fail() + } +} diff --git a/tests/_vendor/src/github.com/docker/docker/api/client/cli.go b/tests/Godeps/_workspace/src/github.com/docker/docker/api/client/cli.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/api/client/cli.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/api/client/cli.go diff --git a/tests/_vendor/src/github.com/docker/docker/api/client/commands.go b/tests/Godeps/_workspace/src/github.com/docker/docker/api/client/commands.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/api/client/commands.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/api/client/commands.go diff --git a/tests/_vendor/src/github.com/docker/docker/api/client/hijack.go b/tests/Godeps/_workspace/src/github.com/docker/docker/api/client/hijack.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/api/client/hijack.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/api/client/hijack.go diff --git a/tests/_vendor/src/github.com/docker/docker/api/client/utils.go b/tests/Godeps/_workspace/src/github.com/docker/docker/api/client/utils.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/api/client/utils.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/api/client/utils.go diff --git a/tests/_vendor/src/github.com/docker/docker/api/common.go b/tests/Godeps/_workspace/src/github.com/docker/docker/api/common.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/api/common.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/api/common.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/api/server/MAINTAINERS b/tests/Godeps/_workspace/src/github.com/docker/docker/api/server/MAINTAINERS new file mode 100644 index 0000000000..c92a061143 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/api/server/MAINTAINERS @@ -0,0 +1,2 @@ +Victor Vieux (@vieux) +Johan Euphrosine (@proppy) diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/api/server/server.go b/tests/Godeps/_workspace/src/github.com/docker/docker/api/server/server.go new file mode 100644 index 0000000000..897dd6142f --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/api/server/server.go @@ -0,0 +1,1530 @@ +package server + +import ( + "bufio" + "bytes" + "crypto/tls" + "crypto/x509" + "encoding/base64" + "encoding/json" + "expvar" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/pprof" + "os" + "strconv" + "strings" + "syscall" + + "code.google.com/p/go.net/websocket" + "github.com/docker/libcontainer/user" + "github.com/gorilla/mux" + + "github.com/docker/docker/api" + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/listenbuffer" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/docker/pkg/systemd" + "github.com/docker/docker/pkg/version" + "github.com/docker/docker/registry" + "github.com/docker/docker/utils" +) + +var ( + activationLock chan struct{} +) + +type HttpApiFunc func(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error + +func hijackServer(w http.ResponseWriter) (io.ReadCloser, io.Writer, error) { + conn, _, err := w.(http.Hijacker).Hijack() + if err != nil { + return nil, nil, err + } + // Flush the options to make sure the client sets the raw mode + conn.Write([]byte{}) + return conn, conn, nil +} + +// Check to make sure request's Content-Type is application/json +func checkForJson(r *http.Request) error { + ct := r.Header.Get("Content-Type") + + // No Content-Type header is ok as long as there's no Body + if ct == "" { + if r.Body == nil || r.ContentLength == 0 { + return nil + } + } + + // Otherwise it better be json + if api.MatchesContentType(ct, "application/json") { + return nil + } + return fmt.Errorf("Content-Type specified (%s) must be 'application/json'", ct) +} + +//If we don't do this, POST method without Content-type (even with empty body) will fail +func parseForm(r *http.Request) error { + if r == nil { + return nil + } + if err := r.ParseForm(); err != nil && !strings.HasPrefix(err.Error(), "mime:") { + return err + } + return nil +} + +func parseMultipartForm(r *http.Request) error { + if err := r.ParseMultipartForm(4096); err != nil && !strings.HasPrefix(err.Error(), "mime:") { + return err + } + return nil +} + +func httpError(w http.ResponseWriter, err error) { + statusCode := http.StatusInternalServerError + // FIXME: this is brittle and should not be necessary. + // If we need to differentiate between different possible error types, we should + // create appropriate error types with clearly defined meaning. + if strings.Contains(err.Error(), "No such") { + statusCode = http.StatusNotFound + } else if strings.Contains(err.Error(), "Bad parameter") { + statusCode = http.StatusBadRequest + } else if strings.Contains(err.Error(), "Conflict") { + statusCode = http.StatusConflict + } else if strings.Contains(err.Error(), "Impossible") { + statusCode = http.StatusNotAcceptable + } else if strings.Contains(err.Error(), "Wrong login/password") { + statusCode = http.StatusUnauthorized + } else if strings.Contains(err.Error(), "hasn't been activated") { + statusCode = http.StatusForbidden + } + + if err != nil { + log.Errorf("HTTP Error: statusCode=%d %s", statusCode, err.Error()) + http.Error(w, err.Error(), statusCode) + } +} + +func writeJSON(w http.ResponseWriter, code int, v engine.Env) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(code) + return v.Encode(w) +} + +func streamJSON(job *engine.Job, w http.ResponseWriter, flush bool) { + w.Header().Set("Content-Type", "application/json") + if flush { + job.Stdout.Add(utils.NewWriteFlusher(w)) + } else { + job.Stdout.Add(w) + } +} + +func getBoolParam(value string) (bool, error) { + if value == "" { + return false, nil + } + ret, err := strconv.ParseBool(value) + if err != nil { + return false, fmt.Errorf("Bad parameter") + } + return ret, nil +} + +func postAuth(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var ( + authConfig, err = ioutil.ReadAll(r.Body) + job = eng.Job("auth") + stdoutBuffer = bytes.NewBuffer(nil) + ) + if err != nil { + return err + } + job.Setenv("authConfig", string(authConfig)) + job.Stdout.Add(stdoutBuffer) + if err = job.Run(); err != nil { + return err + } + if status := engine.Tail(stdoutBuffer, 1); status != "" { + var env engine.Env + env.Set("Status", status) + return writeJSON(w, http.StatusOK, env) + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func getVersion(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + w.Header().Set("Content-Type", "application/json") + eng.ServeHTTP(w, r) + return nil +} + +func postContainersKill(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + if err := parseForm(r); err != nil { + return err + } + job := eng.Job("kill", vars["name"]) + if sig := r.Form.Get("signal"); sig != "" { + job.Args = append(job.Args, sig) + } + if err := job.Run(); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func postContainersPause(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + if err := parseForm(r); err != nil { + return err + } + job := eng.Job("pause", vars["name"]) + if err := job.Run(); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func postContainersUnpause(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + if err := parseForm(r); err != nil { + return err + } + job := eng.Job("unpause", vars["name"]) + if err := job.Run(); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func getContainersExport(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + job := eng.Job("export", vars["name"]) + job.Stdout.Add(w) + if err := job.Run(); err != nil { + return err + } + return nil +} + +func getImagesJSON(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + + var ( + err error + outs *engine.Table + job = eng.Job("images") + ) + + job.Setenv("filters", r.Form.Get("filters")) + // FIXME this parameter could just be a match filter + job.Setenv("filter", r.Form.Get("filter")) + job.Setenv("all", r.Form.Get("all")) + + if version.GreaterThanOrEqualTo("1.7") { + streamJSON(job, w, false) + } else if outs, err = job.Stdout.AddListTable(); err != nil { + return err + } + + if err := job.Run(); err != nil { + return err + } + + if version.LessThan("1.7") && outs != nil { // Convert to legacy format + outsLegacy := engine.NewTable("Created", 0) + for _, out := range outs.Data { + for _, repoTag := range out.GetList("RepoTags") { + repo, tag := parsers.ParseRepositoryTag(repoTag) + outLegacy := &engine.Env{} + outLegacy.Set("Repository", repo) + outLegacy.SetJson("Tag", tag) + outLegacy.Set("Id", out.Get("Id")) + outLegacy.SetInt64("Created", out.GetInt64("Created")) + outLegacy.SetInt64("Size", out.GetInt64("Size")) + outLegacy.SetInt64("VirtualSize", out.GetInt64("VirtualSize")) + outsLegacy.Add(outLegacy) + } + } + w.Header().Set("Content-Type", "application/json") + if _, err := outsLegacy.WriteListTo(w); err != nil { + return err + } + } + return nil +} + +func getImagesViz(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if version.GreaterThan("1.6") { + w.WriteHeader(http.StatusNotFound) + return fmt.Errorf("This is now implemented in the client.") + } + eng.ServeHTTP(w, r) + return nil +} + +func getInfo(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + w.Header().Set("Content-Type", "application/json") + eng.ServeHTTP(w, r) + return nil +} + +func getEvents(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + + var job = eng.Job("events") + streamJSON(job, w, true) + job.Setenv("since", r.Form.Get("since")) + job.Setenv("until", r.Form.Get("until")) + return job.Run() +} + +func getImagesHistory(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + var job = eng.Job("history", vars["name"]) + streamJSON(job, w, false) + + if err := job.Run(); err != nil { + return err + } + return nil +} + +func getContainersChanges(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + var job = eng.Job("container_changes", vars["name"]) + streamJSON(job, w, false) + + return job.Run() +} + +func getContainersTop(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if version.LessThan("1.4") { + return fmt.Errorf("top was improved a lot since 1.3, Please upgrade your docker client.") + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + if err := parseForm(r); err != nil { + return err + } + + job := eng.Job("top", vars["name"], r.Form.Get("ps_args")) + streamJSON(job, w, false) + return job.Run() +} + +func getContainersJSON(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + var ( + err error + outs *engine.Table + job = eng.Job("containers") + ) + + job.Setenv("all", r.Form.Get("all")) + job.Setenv("size", r.Form.Get("size")) + job.Setenv("since", r.Form.Get("since")) + job.Setenv("before", r.Form.Get("before")) + job.Setenv("limit", r.Form.Get("limit")) + job.Setenv("filters", r.Form.Get("filters")) + + if version.GreaterThanOrEqualTo("1.5") { + streamJSON(job, w, false) + } else if outs, err = job.Stdout.AddTable(); err != nil { + return err + } + if err = job.Run(); err != nil { + return err + } + if version.LessThan("1.5") { // Convert to legacy format + for _, out := range outs.Data { + ports := engine.NewTable("", 0) + ports.ReadListFrom([]byte(out.Get("Ports"))) + out.Set("Ports", api.DisplayablePorts(ports)) + } + w.Header().Set("Content-Type", "application/json") + if _, err = outs.WriteListTo(w); err != nil { + return err + } + } + return nil +} + +func getContainersLogs(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + var ( + inspectJob = eng.Job("container_inspect", vars["name"]) + logsJob = eng.Job("logs", vars["name"]) + c, err = inspectJob.Stdout.AddEnv() + ) + if err != nil { + return err + } + logsJob.Setenv("follow", r.Form.Get("follow")) + logsJob.Setenv("tail", r.Form.Get("tail")) + logsJob.Setenv("stdout", r.Form.Get("stdout")) + logsJob.Setenv("stderr", r.Form.Get("stderr")) + logsJob.Setenv("timestamps", r.Form.Get("timestamps")) + // Validate args here, because we can't return not StatusOK after job.Run() call + stdout, stderr := logsJob.GetenvBool("stdout"), logsJob.GetenvBool("stderr") + if !(stdout || stderr) { + return fmt.Errorf("Bad parameters: you must choose at least one stream") + } + if err = inspectJob.Run(); err != nil { + return err + } + + var outStream, errStream io.Writer + outStream = utils.NewWriteFlusher(w) + + if c.GetSubEnv("Config") != nil && !c.GetSubEnv("Config").GetBool("Tty") && version.GreaterThanOrEqualTo("1.6") { + errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) + outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) + } else { + errStream = outStream + } + + logsJob.Stdout.Add(outStream) + logsJob.Stderr.Set(errStream) + if err := logsJob.Run(); err != nil { + fmt.Fprintf(outStream, "Error running logs job: %s\n", err) + } + return nil +} + +func postImagesTag(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + job := eng.Job("tag", vars["name"], r.Form.Get("repo"), r.Form.Get("tag")) + job.Setenv("force", r.Form.Get("force")) + if err := job.Run(); err != nil { + return err + } + w.WriteHeader(http.StatusCreated) + return nil +} + +func postCommit(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + var ( + config engine.Env + env engine.Env + job = eng.Job("commit", r.Form.Get("container")) + stdoutBuffer = bytes.NewBuffer(nil) + ) + + if err := checkForJson(r); err != nil { + return err + } + + if err := config.Decode(r.Body); err != nil { + log.Errorf("%s", err) + } + + if r.FormValue("pause") == "" && version.GreaterThanOrEqualTo("1.13") { + job.Setenv("pause", "1") + } else { + job.Setenv("pause", r.FormValue("pause")) + } + + job.Setenv("repo", r.Form.Get("repo")) + job.Setenv("tag", r.Form.Get("tag")) + job.Setenv("author", r.Form.Get("author")) + job.Setenv("comment", r.Form.Get("comment")) + job.SetenvSubEnv("config", &config) + + job.Stdout.Add(stdoutBuffer) + if err := job.Run(); err != nil { + return err + } + env.Set("Id", engine.Tail(stdoutBuffer, 1)) + return writeJSON(w, http.StatusCreated, env) +} + +// Creates an image from Pull or from Import +func postImagesCreate(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + + var ( + image = r.Form.Get("fromImage") + repo = r.Form.Get("repo") + tag = r.Form.Get("tag") + job *engine.Job + ) + authEncoded := r.Header.Get("X-Registry-Auth") + authConfig := ®istry.AuthConfig{} + if authEncoded != "" { + authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { + // for a pull it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting to be empty + authConfig = ®istry.AuthConfig{} + } + } + if image != "" { //pull + if tag == "" { + image, tag = parsers.ParseRepositoryTag(image) + } + metaHeaders := map[string][]string{} + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + metaHeaders[k] = v + } + } + job = eng.Job("pull", image, tag) + job.SetenvBool("parallel", version.GreaterThan("1.3")) + job.SetenvJson("metaHeaders", metaHeaders) + job.SetenvJson("authConfig", authConfig) + } else { //import + if tag == "" { + repo, tag = parsers.ParseRepositoryTag(repo) + } + job = eng.Job("import", r.Form.Get("fromSrc"), repo, tag) + job.Stdin.Add(r.Body) + } + + if version.GreaterThan("1.0") { + job.SetenvBool("json", true) + streamJSON(job, w, true) + } else { + job.Stdout.Add(utils.NewWriteFlusher(w)) + } + if err := job.Run(); err != nil { + if !job.Stdout.Used() { + return err + } + sf := utils.NewStreamFormatter(version.GreaterThan("1.0")) + w.Write(sf.FormatError(err)) + } + + return nil +} + +func getImagesSearch(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + var ( + authEncoded = r.Header.Get("X-Registry-Auth") + authConfig = ®istry.AuthConfig{} + metaHeaders = map[string][]string{} + ) + + if authEncoded != "" { + authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { + // for a search it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting to be empty + authConfig = ®istry.AuthConfig{} + } + } + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + metaHeaders[k] = v + } + } + + var job = eng.Job("search", r.Form.Get("term")) + job.SetenvJson("metaHeaders", metaHeaders) + job.SetenvJson("authConfig", authConfig) + streamJSON(job, w, false) + + return job.Run() +} + +func postImagesPush(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + metaHeaders := map[string][]string{} + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + metaHeaders[k] = v + } + } + if err := parseForm(r); err != nil { + return err + } + authConfig := ®istry.AuthConfig{} + + authEncoded := r.Header.Get("X-Registry-Auth") + if authEncoded != "" { + // the new format is to handle the authConfig as a header + authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { + // to increase compatibility to existing api it is defaulting to be empty + authConfig = ®istry.AuthConfig{} + } + } else { + // the old format is supported for compatibility if there was no authConfig header + if err := json.NewDecoder(r.Body).Decode(authConfig); err != nil { + return err + } + } + + job := eng.Job("push", vars["name"]) + job.SetenvJson("metaHeaders", metaHeaders) + job.SetenvJson("authConfig", authConfig) + job.Setenv("tag", r.Form.Get("tag")) + if version.GreaterThan("1.0") { + job.SetenvBool("json", true) + streamJSON(job, w, true) + } else { + job.Stdout.Add(utils.NewWriteFlusher(w)) + } + + if err := job.Run(); err != nil { + if !job.Stdout.Used() { + return err + } + sf := utils.NewStreamFormatter(version.GreaterThan("1.0")) + w.Write(sf.FormatError(err)) + } + return nil +} + +func getImagesGet(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + if err := parseForm(r); err != nil { + return err + } + if version.GreaterThan("1.0") { + w.Header().Set("Content-Type", "application/x-tar") + } + var job *engine.Job + if name, ok := vars["name"]; ok { + job = eng.Job("image_export", name) + } else { + job = eng.Job("image_export", r.Form["names"]...) + } + job.Stdout.Add(w) + return job.Run() +} + +func postImagesLoad(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + job := eng.Job("load") + job.Stdin.Add(r.Body) + return job.Run() +} + +func postContainersCreate(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return nil + } + var ( + out engine.Env + job = eng.Job("create", r.Form.Get("name")) + outWarnings []string + stdoutBuffer = bytes.NewBuffer(nil) + warnings = bytes.NewBuffer(nil) + ) + + if err := checkForJson(r); err != nil { + return err + } + + if err := job.DecodeEnv(r.Body); err != nil { + return err + } + // Read container ID from the first line of stdout + job.Stdout.Add(stdoutBuffer) + // Read warnings from stderr + job.Stderr.Add(warnings) + if err := job.Run(); err != nil { + return err + } + // Parse warnings from stderr + scanner := bufio.NewScanner(warnings) + for scanner.Scan() { + outWarnings = append(outWarnings, scanner.Text()) + } + out.Set("Id", engine.Tail(stdoutBuffer, 1)) + out.SetList("Warnings", outWarnings) + + return writeJSON(w, http.StatusCreated, out) +} + +func postContainersRestart(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + job := eng.Job("restart", vars["name"]) + job.Setenv("t", r.Form.Get("t")) + if err := job.Run(); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func deleteContainers(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + job := eng.Job("rm", vars["name"]) + + job.Setenv("forceRemove", r.Form.Get("force")) + + job.Setenv("removeVolume", r.Form.Get("v")) + job.Setenv("removeLink", r.Form.Get("link")) + if err := job.Run(); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func deleteImages(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + var job = eng.Job("image_delete", vars["name"]) + streamJSON(job, w, false) + job.Setenv("force", r.Form.Get("force")) + job.Setenv("noprune", r.Form.Get("noprune")) + + return job.Run() +} + +func postContainersStart(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + var ( + name = vars["name"] + job = eng.Job("start", name) + ) + + // If contentLength is -1, we can assumed chunked encoding + // or more technically that the length is unknown + // http://golang.org/src/pkg/net/http/request.go#L139 + // net/http otherwise seems to swallow any headers related to chunked encoding + // including r.TransferEncoding + // allow a nil body for backwards compatibility + if r.Body != nil && (r.ContentLength > 0 || r.ContentLength == -1) { + if err := checkForJson(r); err != nil { + return err + } + + if err := job.DecodeEnv(r.Body); err != nil { + return err + } + } + + if err := job.Run(); err != nil { + if err.Error() == "Container already started" { + w.WriteHeader(http.StatusNotModified) + return nil + } + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func postContainersStop(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + job := eng.Job("stop", vars["name"]) + job.Setenv("t", r.Form.Get("t")) + if err := job.Run(); err != nil { + if err.Error() == "Container already stopped" { + w.WriteHeader(http.StatusNotModified) + return nil + } + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func postContainersWait(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + var ( + env engine.Env + stdoutBuffer = bytes.NewBuffer(nil) + job = eng.Job("wait", vars["name"]) + ) + job.Stdout.Add(stdoutBuffer) + if err := job.Run(); err != nil { + return err + } + + env.Set("StatusCode", engine.Tail(stdoutBuffer, 1)) + return writeJSON(w, http.StatusOK, env) +} + +func postContainersResize(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + if err := eng.Job("resize", vars["name"], r.Form.Get("h"), r.Form.Get("w")).Run(); err != nil { + return err + } + return nil +} + +func postContainersAttach(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + var ( + job = eng.Job("container_inspect", vars["name"]) + c, err = job.Stdout.AddEnv() + ) + if err != nil { + return err + } + if err = job.Run(); err != nil { + return err + } + + inStream, outStream, err := hijackServer(w) + if err != nil { + return err + } + defer func() { + if tcpc, ok := inStream.(*net.TCPConn); ok { + tcpc.CloseWrite() + } else { + inStream.Close() + } + }() + defer func() { + if tcpc, ok := outStream.(*net.TCPConn); ok { + tcpc.CloseWrite() + } else if closer, ok := outStream.(io.Closer); ok { + closer.Close() + } + }() + + var errStream io.Writer + + fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") + + if c.GetSubEnv("Config") != nil && !c.GetSubEnv("Config").GetBool("Tty") && version.GreaterThanOrEqualTo("1.6") { + errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) + outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) + } else { + errStream = outStream + } + + job = eng.Job("attach", vars["name"]) + job.Setenv("logs", r.Form.Get("logs")) + job.Setenv("stream", r.Form.Get("stream")) + job.Setenv("stdin", r.Form.Get("stdin")) + job.Setenv("stdout", r.Form.Get("stdout")) + job.Setenv("stderr", r.Form.Get("stderr")) + job.Stdin.Add(inStream) + job.Stdout.Add(outStream) + job.Stderr.Set(errStream) + if err := job.Run(); err != nil { + fmt.Fprintf(outStream, "Error attaching: %s\n", err) + + } + return nil +} + +func wsContainersAttach(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + if err := eng.Job("container_inspect", vars["name"]).Run(); err != nil { + return err + } + + h := websocket.Handler(func(ws *websocket.Conn) { + defer ws.Close() + job := eng.Job("attach", vars["name"]) + job.Setenv("logs", r.Form.Get("logs")) + job.Setenv("stream", r.Form.Get("stream")) + job.Setenv("stdin", r.Form.Get("stdin")) + job.Setenv("stdout", r.Form.Get("stdout")) + job.Setenv("stderr", r.Form.Get("stderr")) + job.Stdin.Add(ws) + job.Stdout.Add(ws) + job.Stderr.Set(ws) + if err := job.Run(); err != nil { + log.Errorf("Error attaching websocket: %s", err) + } + }) + h.ServeHTTP(w, r) + + return nil +} + +func getContainersByName(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + var job = eng.Job("container_inspect", vars["name"]) + if version.LessThan("1.12") { + job.SetenvBool("raw", true) + } + streamJSON(job, w, false) + return job.Run() +} + +func getImagesByName(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + var job = eng.Job("image_inspect", vars["name"]) + if version.LessThan("1.12") { + job.SetenvBool("raw", true) + } + streamJSON(job, w, false) + return job.Run() +} + +func postBuild(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if version.LessThan("1.3") { + return fmt.Errorf("Multipart upload for build is no longer supported. Please upgrade your docker client.") + } + var ( + authEncoded = r.Header.Get("X-Registry-Auth") + authConfig = ®istry.AuthConfig{} + configFileEncoded = r.Header.Get("X-Registry-Config") + configFile = ®istry.ConfigFile{} + job = eng.Job("build") + ) + + // This block can be removed when API versions prior to 1.9 are deprecated. + // Both headers will be parsed and sent along to the daemon, but if a non-empty + // ConfigFile is present, any value provided as an AuthConfig directly will + // be overridden. See BuildFile::CmdFrom for details. + if version.LessThan("1.9") && authEncoded != "" { + authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { + // for a pull it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting to be empty + authConfig = ®istry.AuthConfig{} + } + } + + if configFileEncoded != "" { + configFileJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(configFileEncoded)) + if err := json.NewDecoder(configFileJson).Decode(configFile); err != nil { + // for a pull it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting to be empty + configFile = ®istry.ConfigFile{} + } + } + + if version.GreaterThanOrEqualTo("1.8") { + job.SetenvBool("json", true) + streamJSON(job, w, true) + } else { + job.Stdout.Add(utils.NewWriteFlusher(w)) + } + + if r.FormValue("forcerm") == "1" && version.GreaterThanOrEqualTo("1.12") { + job.Setenv("rm", "1") + } else if r.FormValue("rm") == "" && version.GreaterThanOrEqualTo("1.12") { + job.Setenv("rm", "1") + } else { + job.Setenv("rm", r.FormValue("rm")) + } + job.Stdin.Add(r.Body) + job.Setenv("remote", r.FormValue("remote")) + job.Setenv("t", r.FormValue("t")) + job.Setenv("q", r.FormValue("q")) + job.Setenv("nocache", r.FormValue("nocache")) + job.Setenv("forcerm", r.FormValue("forcerm")) + job.SetenvJson("authConfig", authConfig) + job.SetenvJson("configFile", configFile) + + if err := job.Run(); err != nil { + if !job.Stdout.Used() { + return err + } + sf := utils.NewStreamFormatter(version.GreaterThanOrEqualTo("1.8")) + w.Write(sf.FormatError(err)) + } + return nil +} + +func postContainersCopy(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + var copyData engine.Env + + if err := checkForJson(r); err != nil { + return err + } + + if err := copyData.Decode(r.Body); err != nil { + return err + } + + if copyData.Get("Resource") == "" { + return fmt.Errorf("Path cannot be empty") + } + + origResource := copyData.Get("Resource") + + if copyData.Get("Resource")[0] == '/' { + copyData.Set("Resource", copyData.Get("Resource")[1:]) + } + + job := eng.Job("container_copy", vars["name"], copyData.Get("Resource")) + job.Stdout.Add(w) + w.Header().Set("Content-Type", "application/x-tar") + if err := job.Run(); err != nil { + log.Errorf("%s", err.Error()) + if strings.Contains(err.Error(), "No such container") { + w.WriteHeader(http.StatusNotFound) + } else if strings.Contains(err.Error(), "no such file or directory") { + return fmt.Errorf("Could not find the file %s in container %s", origResource, vars["name"]) + } + } + return nil +} + +func postContainerExecCreate(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return nil + } + var ( + out engine.Env + name = vars["name"] + job = eng.Job("execCreate", name) + stdoutBuffer = bytes.NewBuffer(nil) + ) + + if err := job.DecodeEnv(r.Body); err != nil { + return err + } + + job.Stdout.Add(stdoutBuffer) + // Register an instance of Exec in container. + if err := job.Run(); err != nil { + fmt.Fprintf(os.Stderr, "Error setting up exec command in container %s: %s\n", name, err) + return err + } + // Return the ID + out.Set("Id", engine.Tail(stdoutBuffer, 1)) + + return writeJSON(w, http.StatusCreated, out) +} + +// TODO(vishh): Refactor the code to avoid having to specify stream config as part of both create and start. +func postContainerExecStart(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return nil + } + var ( + name = vars["name"] + job = eng.Job("execStart", name) + errOut io.Writer = os.Stderr + ) + + if err := job.DecodeEnv(r.Body); err != nil { + return err + } + if !job.GetenvBool("Detach") { + // Setting up the streaming http interface. + inStream, outStream, err := hijackServer(w) + if err != nil { + return err + } + + defer func() { + if tcpc, ok := inStream.(*net.TCPConn); ok { + tcpc.CloseWrite() + } else { + inStream.Close() + } + }() + defer func() { + if tcpc, ok := outStream.(*net.TCPConn); ok { + tcpc.CloseWrite() + } else if closer, ok := outStream.(io.Closer); ok { + closer.Close() + } + }() + + var errStream io.Writer + + fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") + if !job.GetenvBool("Tty") && version.GreaterThanOrEqualTo("1.6") { + errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) + outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) + } else { + errStream = outStream + } + job.Stdin.Add(inStream) + job.Stdout.Add(outStream) + job.Stderr.Set(errStream) + errOut = outStream + } + // Now run the user process in container. + job.SetCloseIO(false) + if err := job.Run(); err != nil { + fmt.Fprintf(errOut, "Error starting exec command in container %s: %s\n", name, err) + return err + } + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func postContainerExecResize(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + if err := eng.Job("execResize", vars["name"], r.Form.Get("h"), r.Form.Get("w")).Run(); err != nil { + return err + } + return nil +} + +func optionsHandler(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + w.WriteHeader(http.StatusOK) + return nil +} +func writeCorsHeaders(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Access-Control-Allow-Origin", "*") + w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept") + w.Header().Add("Access-Control-Allow-Methods", "GET, POST, DELETE, PUT, OPTIONS") +} + +func ping(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + _, err := w.Write([]byte{'O', 'K'}) + return err +} + +func makeHttpHandler(eng *engine.Engine, logging bool, localMethod string, localRoute string, handlerFunc HttpApiFunc, enableCors bool, dockerVersion version.Version) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + // log the request + log.Debugf("Calling %s %s", localMethod, localRoute) + + if logging { + log.Infof("%s %s", r.Method, r.RequestURI) + } + + if strings.Contains(r.Header.Get("User-Agent"), "Docker-Client/") { + userAgent := strings.Split(r.Header.Get("User-Agent"), "/") + if len(userAgent) == 2 && !dockerVersion.Equal(version.Version(userAgent[1])) { + log.Debugf("Warning: client and server don't have the same version (client: %s, server: %s)", userAgent[1], dockerVersion) + } + } + version := version.Version(mux.Vars(r)["version"]) + if version == "" { + version = api.APIVERSION + } + if enableCors { + writeCorsHeaders(w, r) + } + + if version.GreaterThan(api.APIVERSION) { + http.Error(w, fmt.Errorf("client and server don't have same version (client : %s, server: %s)", version, api.APIVERSION).Error(), http.StatusNotFound) + return + } + + if err := handlerFunc(eng, version, w, r, mux.Vars(r)); err != nil { + log.Errorf("Handler for %s %s returned error: %s", localMethod, localRoute, err) + httpError(w, err) + } + } +} + +// Replicated from expvar.go as not public. +func expvarHandler(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + fmt.Fprintf(w, "{\n") + first := true + expvar.Do(func(kv expvar.KeyValue) { + if !first { + fmt.Fprintf(w, ",\n") + } + first = false + fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) + }) + fmt.Fprintf(w, "\n}\n") +} + +func AttachProfiler(router *mux.Router) { + router.HandleFunc("/debug/vars", expvarHandler) + router.HandleFunc("/debug/pprof/", pprof.Index) + router.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) + router.HandleFunc("/debug/pprof/profile", pprof.Profile) + router.HandleFunc("/debug/pprof/symbol", pprof.Symbol) + router.HandleFunc("/debug/pprof/heap", pprof.Handler("heap").ServeHTTP) + router.HandleFunc("/debug/pprof/goroutine", pprof.Handler("goroutine").ServeHTTP) + router.HandleFunc("/debug/pprof/threadcreate", pprof.Handler("threadcreate").ServeHTTP) +} + +func createRouter(eng *engine.Engine, logging, enableCors bool, dockerVersion string) (*mux.Router, error) { + r := mux.NewRouter() + if os.Getenv("DEBUG") != "" { + AttachProfiler(r) + } + m := map[string]map[string]HttpApiFunc{ + "GET": { + "/_ping": ping, + "/events": getEvents, + "/info": getInfo, + "/version": getVersion, + "/images/json": getImagesJSON, + "/images/viz": getImagesViz, + "/images/search": getImagesSearch, + "/images/get": getImagesGet, + "/images/{name:.*}/get": getImagesGet, + "/images/{name:.*}/history": getImagesHistory, + "/images/{name:.*}/json": getImagesByName, + "/containers/ps": getContainersJSON, + "/containers/json": getContainersJSON, + "/containers/{name:.*}/export": getContainersExport, + "/containers/{name:.*}/changes": getContainersChanges, + "/containers/{name:.*}/json": getContainersByName, + "/containers/{name:.*}/top": getContainersTop, + "/containers/{name:.*}/logs": getContainersLogs, + "/containers/{name:.*}/attach/ws": wsContainersAttach, + }, + "POST": { + "/auth": postAuth, + "/commit": postCommit, + "/build": postBuild, + "/images/create": postImagesCreate, + "/images/load": postImagesLoad, + "/images/{name:.*}/push": postImagesPush, + "/images/{name:.*}/tag": postImagesTag, + "/containers/create": postContainersCreate, + "/containers/{name:.*}/kill": postContainersKill, + "/containers/{name:.*}/pause": postContainersPause, + "/containers/{name:.*}/unpause": postContainersUnpause, + "/containers/{name:.*}/restart": postContainersRestart, + "/containers/{name:.*}/start": postContainersStart, + "/containers/{name:.*}/stop": postContainersStop, + "/containers/{name:.*}/wait": postContainersWait, + "/containers/{name:.*}/resize": postContainersResize, + "/containers/{name:.*}/attach": postContainersAttach, + "/containers/{name:.*}/copy": postContainersCopy, + "/containers/{name:.*}/exec": postContainerExecCreate, + "/exec/{name:.*}/start": postContainerExecStart, + "/exec/{name:.*}/resize": postContainerExecResize, + }, + "DELETE": { + "/containers/{name:.*}": deleteContainers, + "/images/{name:.*}": deleteImages, + }, + "OPTIONS": { + "": optionsHandler, + }, + } + + for method, routes := range m { + for route, fct := range routes { + log.Debugf("Registering %s, %s", method, route) + // NOTE: scope issue, make sure the variables are local and won't be changed + localRoute := route + localFct := fct + localMethod := method + + // build the handler function + f := makeHttpHandler(eng, logging, localMethod, localRoute, localFct, enableCors, version.Version(dockerVersion)) + + // add the new route + if localRoute == "" { + r.Methods(localMethod).HandlerFunc(f) + } else { + r.Path("/v{version:[0-9.]+}" + localRoute).Methods(localMethod).HandlerFunc(f) + r.Path(localRoute).Methods(localMethod).HandlerFunc(f) + } + } + } + + return r, nil +} + +// ServeRequest processes a single http request to the docker remote api. +// FIXME: refactor this to be part of Server and not require re-creating a new +// router each time. This requires first moving ListenAndServe into Server. +func ServeRequest(eng *engine.Engine, apiversion version.Version, w http.ResponseWriter, req *http.Request) error { + router, err := createRouter(eng, false, true, "") + if err != nil { + return err + } + // Insert APIVERSION into the request as a convenience + req.URL.Path = fmt.Sprintf("/v%s%s", apiversion, req.URL.Path) + router.ServeHTTP(w, req) + return nil +} + +// ServeFD creates an http.Server and sets it up to serve given a socket activated +// argument. +func ServeFd(addr string, handle http.Handler) error { + ls, e := systemd.ListenFD(addr) + if e != nil { + return e + } + + chErrors := make(chan error, len(ls)) + + // We don't want to start serving on these sockets until the + // daemon is initialized and installed. Otherwise required handlers + // won't be ready. + <-activationLock + + // Since ListenFD will return one or more sockets we have + // to create a go func to spawn off multiple serves + for i := range ls { + listener := ls[i] + go func() { + httpSrv := http.Server{Handler: handle} + chErrors <- httpSrv.Serve(listener) + }() + } + + for i := 0; i < len(ls); i++ { + err := <-chErrors + if err != nil { + return err + } + } + + return nil +} + +func lookupGidByName(nameOrGid string) (int, error) { + groups, err := user.ParseGroupFilter(func(g *user.Group) bool { + return g.Name == nameOrGid || strconv.Itoa(g.Gid) == nameOrGid + }) + if err != nil { + return -1, err + } + if groups != nil && len(groups) > 0 { + return groups[0].Gid, nil + } + return -1, fmt.Errorf("Group %s not found", nameOrGid) +} + +func changeGroup(addr string, nameOrGid string) error { + gid, err := lookupGidByName(nameOrGid) + if err != nil { + return err + } + + log.Debugf("%s group found. gid: %d", nameOrGid, gid) + return os.Chown(addr, 0, gid) +} + +// ListenAndServe sets up the required http.Server and gets it listening for +// each addr passed in and does protocol specific checking. +func ListenAndServe(proto, addr string, job *engine.Job) error { + var l net.Listener + r, err := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("Version")) + if err != nil { + return err + } + + if proto == "fd" { + return ServeFd(addr, r) + } + + if proto == "unix" { + if err := syscall.Unlink(addr); err != nil && !os.IsNotExist(err) { + return err + } + } + + var oldmask int + if proto == "unix" { + oldmask = syscall.Umask(0777) + } + + if job.GetenvBool("BufferRequests") { + l, err = listenbuffer.NewListenBuffer(proto, addr, activationLock) + } else { + l, err = net.Listen(proto, addr) + } + + if proto == "unix" { + syscall.Umask(oldmask) + } + if err != nil { + return err + } + + if proto != "unix" && (job.GetenvBool("Tls") || job.GetenvBool("TlsVerify")) { + tlsCert := job.Getenv("TlsCert") + tlsKey := job.Getenv("TlsKey") + cert, err := tls.LoadX509KeyPair(tlsCert, tlsKey) + if err != nil { + return fmt.Errorf("Couldn't load X509 key pair (%s, %s): %s. Key encrypted?", + tlsCert, tlsKey, err) + } + tlsConfig := &tls.Config{ + NextProtos: []string{"http/1.1"}, + Certificates: []tls.Certificate{cert}, + } + if job.GetenvBool("TlsVerify") { + certPool := x509.NewCertPool() + file, err := ioutil.ReadFile(job.Getenv("TlsCa")) + if err != nil { + return fmt.Errorf("Couldn't read CA certificate: %s", err) + } + certPool.AppendCertsFromPEM(file) + + tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + tlsConfig.ClientCAs = certPool + } + l = tls.NewListener(l, tlsConfig) + } + + // Basic error and sanity checking + switch proto { + case "tcp": + if !strings.HasPrefix(addr, "127.0.0.1") && !job.GetenvBool("TlsVerify") { + log.Infof("/!\\ DON'T BIND ON ANOTHER IP ADDRESS THAN 127.0.0.1 IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\") + } + case "unix": + socketGroup := job.Getenv("SocketGroup") + if socketGroup != "" { + if err := changeGroup(addr, socketGroup); err != nil { + if socketGroup == "docker" { + // if the user hasn't explicitly specified the group ownership, don't fail on errors. + log.Debugf("Warning: could not chgrp %s to docker: %s", addr, err.Error()) + } else { + return err + } + } + + } + if err := os.Chmod(addr, 0660); err != nil { + return err + } + default: + return fmt.Errorf("Invalid protocol format.") + } + + httpSrv := http.Server{Addr: addr, Handler: r} + return httpSrv.Serve(l) +} + +// ServeApi loops through all of the protocols sent in to docker and spawns +// off a go routine to setup a serving http.Server for each. +func ServeApi(job *engine.Job) engine.Status { + if len(job.Args) == 0 { + return job.Errorf("usage: %s PROTO://ADDR [PROTO://ADDR ...]", job.Name) + } + var ( + protoAddrs = job.Args + chErrors = make(chan error, len(protoAddrs)) + ) + activationLock = make(chan struct{}) + + for _, protoAddr := range protoAddrs { + protoAddrParts := strings.SplitN(protoAddr, "://", 2) + if len(protoAddrParts) != 2 { + return job.Errorf("usage: %s PROTO://ADDR [PROTO://ADDR ...]", job.Name) + } + go func() { + log.Infof("Listening for HTTP on %s (%s)", protoAddrParts[0], protoAddrParts[1]) + chErrors <- ListenAndServe(protoAddrParts[0], protoAddrParts[1], job) + }() + } + + for i := 0; i < len(protoAddrs); i++ { + err := <-chErrors + if err != nil { + return job.Error(err) + } + } + + return engine.StatusOK +} + +func AcceptConnections(job *engine.Job) engine.Status { + // Tell the init daemon we are accepting requests + go systemd.SdNotify("READY=1") + + // close the lock so the listeners start accepting connections + if activationLock != nil { + close(activationLock) + } + + return engine.StatusOK +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/api/server/server_unit_test.go b/tests/Godeps/_workspace/src/github.com/docker/docker/api/server/server_unit_test.go new file mode 100644 index 0000000000..519652f377 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/api/server/server_unit_test.go @@ -0,0 +1,555 @@ +package server + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "net/http/httptest" + "reflect" + "strings" + "testing" + + "github.com/docker/docker/api" + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/version" +) + +func TestGetBoolParam(t *testing.T) { + if ret, err := getBoolParam("true"); err != nil || !ret { + t.Fatalf("true -> true, nil | got %t %s", ret, err) + } + if ret, err := getBoolParam("True"); err != nil || !ret { + t.Fatalf("True -> true, nil | got %t %s", ret, err) + } + if ret, err := getBoolParam("1"); err != nil || !ret { + t.Fatalf("1 -> true, nil | got %t %s", ret, err) + } + if ret, err := getBoolParam(""); err != nil || ret { + t.Fatalf("\"\" -> false, nil | got %t %s", ret, err) + } + if ret, err := getBoolParam("false"); err != nil || ret { + t.Fatalf("false -> false, nil | got %t %s", ret, err) + } + if ret, err := getBoolParam("0"); err != nil || ret { + t.Fatalf("0 -> false, nil | got %t %s", ret, err) + } + if ret, err := getBoolParam("faux"); err == nil || ret { + t.Fatalf("faux -> false, err | got %t %s", ret, err) + + } +} + +func TesthttpError(t *testing.T) { + r := httptest.NewRecorder() + + httpError(r, fmt.Errorf("No such method")) + if r.Code != http.StatusNotFound { + t.Fatalf("Expected %d, got %d", http.StatusNotFound, r.Code) + } + + httpError(r, fmt.Errorf("This accound hasn't been activated")) + if r.Code != http.StatusForbidden { + t.Fatalf("Expected %d, got %d", http.StatusForbidden, r.Code) + } + + httpError(r, fmt.Errorf("Some error")) + if r.Code != http.StatusInternalServerError { + t.Fatalf("Expected %d, got %d", http.StatusInternalServerError, r.Code) + } +} + +func TestGetVersion(t *testing.T) { + eng := engine.New() + var called bool + eng.Register("version", func(job *engine.Job) engine.Status { + called = true + v := &engine.Env{} + v.SetJson("Version", "42.1") + v.Set("ApiVersion", "1.1.1.1.1") + v.Set("GoVersion", "2.42") + v.Set("Os", "Linux") + v.Set("Arch", "x86_64") + if _, err := v.WriteTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK + }) + r := serveRequest("GET", "/version", nil, eng, t) + if !called { + t.Fatalf("handler was not called") + } + v := readEnv(r.Body, t) + if v.Get("Version") != "42.1" { + t.Fatalf("%#v\n", v) + } + if r.HeaderMap.Get("Content-Type") != "application/json" { + t.Fatalf("%#v\n", r) + } +} + +func TestGetInfo(t *testing.T) { + eng := engine.New() + var called bool + eng.Register("info", func(job *engine.Job) engine.Status { + called = true + v := &engine.Env{} + v.SetInt("Containers", 1) + v.SetInt("Images", 42000) + if _, err := v.WriteTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK + }) + r := serveRequest("GET", "/info", nil, eng, t) + if !called { + t.Fatalf("handler was not called") + } + v := readEnv(r.Body, t) + if v.GetInt("Images") != 42000 { + t.Fatalf("%#v\n", v) + } + if v.GetInt("Containers") != 1 { + t.Fatalf("%#v\n", v) + } + assertContentType(r, "application/json", t) +} + +func TestGetImagesJSON(t *testing.T) { + eng := engine.New() + var called bool + eng.Register("images", func(job *engine.Job) engine.Status { + called = true + v := createEnvFromGetImagesJSONStruct(sampleImage) + if _, err := v.WriteTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK + }) + r := serveRequest("GET", "/images/json", nil, eng, t) + if !called { + t.Fatal("handler was not called") + } + assertHttpNotError(r, t) + assertContentType(r, "application/json", t) + var observed getImagesJSONStruct + if err := json.Unmarshal(r.Body.Bytes(), &observed); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(observed, sampleImage) { + t.Errorf("Expected %#v but got %#v", sampleImage, observed) + } +} + +func TestGetImagesJSONFilter(t *testing.T) { + eng := engine.New() + filter := "nothing" + eng.Register("images", func(job *engine.Job) engine.Status { + filter = job.Getenv("filter") + return engine.StatusOK + }) + serveRequest("GET", "/images/json?filter=aaaa", nil, eng, t) + if filter != "aaaa" { + t.Errorf("%#v", filter) + } +} + +func TestGetImagesJSONFilters(t *testing.T) { + eng := engine.New() + filter := "nothing" + eng.Register("images", func(job *engine.Job) engine.Status { + filter = job.Getenv("filters") + return engine.StatusOK + }) + serveRequest("GET", "/images/json?filters=nnnn", nil, eng, t) + if filter != "nnnn" { + t.Errorf("%#v", filter) + } +} + +func TestGetImagesJSONAll(t *testing.T) { + eng := engine.New() + allFilter := "-1" + eng.Register("images", func(job *engine.Job) engine.Status { + allFilter = job.Getenv("all") + return engine.StatusOK + }) + serveRequest("GET", "/images/json?all=1", nil, eng, t) + if allFilter != "1" { + t.Errorf("%#v", allFilter) + } +} + +func TestGetImagesJSONLegacyFormat(t *testing.T) { + eng := engine.New() + var called bool + eng.Register("images", func(job *engine.Job) engine.Status { + called = true + outsLegacy := engine.NewTable("Created", 0) + outsLegacy.Add(createEnvFromGetImagesJSONStruct(sampleImage)) + if _, err := outsLegacy.WriteListTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK + }) + r := serveRequestUsingVersion("GET", "/images/json", "1.6", nil, eng, t) + if !called { + t.Fatal("handler was not called") + } + assertHttpNotError(r, t) + assertContentType(r, "application/json", t) + images := engine.NewTable("Created", 0) + if _, err := images.ReadListFrom(r.Body.Bytes()); err != nil { + t.Fatal(err) + } + if images.Len() != 1 { + t.Fatalf("Expected 1 image, %d found", images.Len()) + } + image := images.Data[0] + if image.Get("Tag") != "test-tag" { + t.Errorf("Expected tag 'test-tag', found '%s'", image.Get("Tag")) + } + if image.Get("Repository") != "test-name" { + t.Errorf("Expected repository 'test-name', found '%s'", image.Get("Repository")) + } +} + +func TestGetContainersByName(t *testing.T) { + eng := engine.New() + name := "container_name" + var called bool + eng.Register("container_inspect", func(job *engine.Job) engine.Status { + called = true + if job.Args[0] != name { + t.Errorf("name != '%s': %#v", name, job.Args[0]) + } + if api.APIVERSION.LessThan("1.12") && !job.GetenvBool("dirty") { + t.Errorf("dirty env variable not set") + } else if api.APIVERSION.GreaterThanOrEqualTo("1.12") && job.GetenvBool("dirty") { + t.Errorf("dirty env variable set when it shouldn't") + } + v := &engine.Env{} + v.SetBool("dirty", true) + if _, err := v.WriteTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK + }) + r := serveRequest("GET", "/containers/"+name+"/json", nil, eng, t) + if !called { + t.Fatal("handler was not called") + } + assertContentType(r, "application/json", t) + var stdoutJson interface{} + if err := json.Unmarshal(r.Body.Bytes(), &stdoutJson); err != nil { + t.Fatalf("%#v", err) + } + if stdoutJson.(map[string]interface{})["dirty"].(float64) != 1 { + t.Fatalf("%#v", stdoutJson) + } +} + +func TestGetEvents(t *testing.T) { + eng := engine.New() + var called bool + eng.Register("events", func(job *engine.Job) engine.Status { + called = true + since := job.Getenv("since") + if since != "1" { + t.Fatalf("'since' should be 1, found %#v instead", since) + } + until := job.Getenv("until") + if until != "0" { + t.Fatalf("'until' should be 0, found %#v instead", until) + } + v := &engine.Env{} + v.Set("since", since) + v.Set("until", until) + if _, err := v.WriteTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK + }) + r := serveRequest("GET", "/events?since=1&until=0", nil, eng, t) + if !called { + t.Fatal("handler was not called") + } + assertContentType(r, "application/json", t) + var stdout_json struct { + Since int + Until int + } + if err := json.Unmarshal(r.Body.Bytes(), &stdout_json); err != nil { + t.Fatal(err) + } + if stdout_json.Since != 1 { + t.Errorf("since != 1: %#v", stdout_json.Since) + } + if stdout_json.Until != 0 { + t.Errorf("until != 0: %#v", stdout_json.Until) + } +} + +func TestLogs(t *testing.T) { + eng := engine.New() + var inspect bool + var logs bool + eng.Register("container_inspect", func(job *engine.Job) engine.Status { + inspect = true + if len(job.Args) == 0 { + t.Fatal("Job arguments is empty") + } + if job.Args[0] != "test" { + t.Fatalf("Container name %s, must be test", job.Args[0]) + } + return engine.StatusOK + }) + expected := "logs" + eng.Register("logs", func(job *engine.Job) engine.Status { + logs = true + if len(job.Args) == 0 { + t.Fatal("Job arguments is empty") + } + if job.Args[0] != "test" { + t.Fatalf("Container name %s, must be test", job.Args[0]) + } + follow := job.Getenv("follow") + if follow != "1" { + t.Fatalf("follow: %s, must be 1", follow) + } + stdout := job.Getenv("stdout") + if stdout != "1" { + t.Fatalf("stdout %s, must be 1", stdout) + } + stderr := job.Getenv("stderr") + if stderr != "" { + t.Fatalf("stderr %s, must be empty", stderr) + } + timestamps := job.Getenv("timestamps") + if timestamps != "1" { + t.Fatalf("timestamps %s, must be 1", timestamps) + } + job.Stdout.Write([]byte(expected)) + return engine.StatusOK + }) + r := serveRequest("GET", "/containers/test/logs?follow=1&stdout=1×tamps=1", nil, eng, t) + if r.Code != http.StatusOK { + t.Fatalf("Got status %d, expected %d", r.Code, http.StatusOK) + } + if !inspect { + t.Fatal("container_inspect job was not called") + } + if !logs { + t.Fatal("logs job was not called") + } + res := r.Body.String() + if res != expected { + t.Fatalf("Output %s, expected %s", res, expected) + } +} + +func TestLogsNoStreams(t *testing.T) { + eng := engine.New() + var inspect bool + var logs bool + eng.Register("container_inspect", func(job *engine.Job) engine.Status { + inspect = true + if len(job.Args) == 0 { + t.Fatal("Job arguments is empty") + } + if job.Args[0] != "test" { + t.Fatalf("Container name %s, must be test", job.Args[0]) + } + return engine.StatusOK + }) + eng.Register("logs", func(job *engine.Job) engine.Status { + logs = true + return engine.StatusOK + }) + r := serveRequest("GET", "/containers/test/logs", nil, eng, t) + if r.Code != http.StatusBadRequest { + t.Fatalf("Got status %d, expected %d", r.Code, http.StatusBadRequest) + } + if inspect { + t.Fatal("container_inspect job was called, but it shouldn't") + } + if logs { + t.Fatal("logs job was called, but it shouldn't") + } + res := strings.TrimSpace(r.Body.String()) + expected := "Bad parameters: you must choose at least one stream" + if !strings.Contains(res, expected) { + t.Fatalf("Output %s, expected %s in it", res, expected) + } +} + +func TestGetImagesHistory(t *testing.T) { + eng := engine.New() + imageName := "docker-test-image" + var called bool + eng.Register("history", func(job *engine.Job) engine.Status { + called = true + if len(job.Args) == 0 { + t.Fatal("Job arguments is empty") + } + if job.Args[0] != imageName { + t.Fatalf("name != '%s': %#v", imageName, job.Args[0]) + } + v := &engine.Env{} + if _, err := v.WriteTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK + }) + r := serveRequest("GET", "/images/"+imageName+"/history", nil, eng, t) + if !called { + t.Fatalf("handler was not called") + } + if r.Code != http.StatusOK { + t.Fatalf("Got status %d, expected %d", r.Code, http.StatusOK) + } + if r.HeaderMap.Get("Content-Type") != "application/json" { + t.Fatalf("%#v\n", r) + } +} + +func TestGetImagesByName(t *testing.T) { + eng := engine.New() + name := "image_name" + var called bool + eng.Register("image_inspect", func(job *engine.Job) engine.Status { + called = true + if job.Args[0] != name { + t.Fatalf("name != '%s': %#v", name, job.Args[0]) + } + if api.APIVERSION.LessThan("1.12") && !job.GetenvBool("dirty") { + t.Fatal("dirty env variable not set") + } else if api.APIVERSION.GreaterThanOrEqualTo("1.12") && job.GetenvBool("dirty") { + t.Fatal("dirty env variable set when it shouldn't") + } + v := &engine.Env{} + v.SetBool("dirty", true) + if _, err := v.WriteTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK + }) + r := serveRequest("GET", "/images/"+name+"/json", nil, eng, t) + if !called { + t.Fatal("handler was not called") + } + if r.HeaderMap.Get("Content-Type") != "application/json" { + t.Fatalf("%#v\n", r) + } + var stdoutJson interface{} + if err := json.Unmarshal(r.Body.Bytes(), &stdoutJson); err != nil { + t.Fatalf("%#v", err) + } + if stdoutJson.(map[string]interface{})["dirty"].(float64) != 1 { + t.Fatalf("%#v", stdoutJson) + } +} + +func TestDeleteContainers(t *testing.T) { + eng := engine.New() + name := "foo" + var called bool + eng.Register("rm", func(job *engine.Job) engine.Status { + called = true + if len(job.Args) == 0 { + t.Fatalf("Job arguments is empty") + } + if job.Args[0] != name { + t.Fatalf("name != '%s': %#v", name, job.Args[0]) + } + return engine.StatusOK + }) + r := serveRequest("DELETE", "/containers/"+name, nil, eng, t) + if !called { + t.Fatalf("handler was not called") + } + if r.Code != http.StatusNoContent { + t.Fatalf("Got status %d, expected %d", r.Code, http.StatusNoContent) + } +} + +func serveRequest(method, target string, body io.Reader, eng *engine.Engine, t *testing.T) *httptest.ResponseRecorder { + return serveRequestUsingVersion(method, target, api.APIVERSION, body, eng, t) +} + +func serveRequestUsingVersion(method, target string, version version.Version, body io.Reader, eng *engine.Engine, t *testing.T) *httptest.ResponseRecorder { + r := httptest.NewRecorder() + req, err := http.NewRequest(method, target, body) + if err != nil { + t.Fatal(err) + } + if err := ServeRequest(eng, version, r, req); err != nil { + t.Fatal(err) + } + return r +} + +func readEnv(src io.Reader, t *testing.T) *engine.Env { + out := engine.NewOutput() + v, err := out.AddEnv() + if err != nil { + t.Fatal(err) + } + if _, err := io.Copy(out, src); err != nil { + t.Fatal(err) + } + out.Close() + return v +} + +func toJson(data interface{}, t *testing.T) io.Reader { + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(data); err != nil { + t.Fatal(err) + } + return &buf +} + +func assertContentType(recorder *httptest.ResponseRecorder, content_type string, t *testing.T) { + if recorder.HeaderMap.Get("Content-Type") != content_type { + t.Fatalf("%#v\n", recorder) + } +} + +// XXX: Duplicated from integration/utils_test.go, but maybe that's OK as that +// should die as soon as we converted all integration tests? +// assertHttpNotError expect the given response to not have an error. +// Otherwise the it causes the test to fail. +func assertHttpNotError(r *httptest.ResponseRecorder, t *testing.T) { + // Non-error http status are [200, 400) + if r.Code < http.StatusOK || r.Code >= http.StatusBadRequest { + t.Fatal(fmt.Errorf("Unexpected http error: %v", r.Code)) + } +} + +func createEnvFromGetImagesJSONStruct(data getImagesJSONStruct) *engine.Env { + v := &engine.Env{} + v.SetList("RepoTags", data.RepoTags) + v.Set("Id", data.Id) + v.SetInt64("Created", data.Created) + v.SetInt64("Size", data.Size) + v.SetInt64("VirtualSize", data.VirtualSize) + return v +} + +type getImagesJSONStruct struct { + RepoTags []string + Id string + Created int64 + Size int64 + VirtualSize int64 +} + +var sampleImage getImagesJSONStruct = getImagesJSONStruct{ + RepoTags: []string{"test-name:test-tag"}, + Id: "ID", + Created: 999, + Size: 777, + VirtualSize: 666, +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/aufs/aufs.go b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/aufs/aufs.go new file mode 100644 index 0000000000..8e3ae0b181 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/aufs/aufs.go @@ -0,0 +1,449 @@ +/* + +aufs driver directory structure + +. +├── layers // Metadata of layers +│   ├── 1 +│   ├── 2 +│   └── 3 +├── diff // Content of the layer +│   ├── 1 // Contains layers that need to be mounted for the id +│   ├── 2 +│   └── 3 +└── mnt // Mount points for the rw layers to be mounted + ├── 1 + ├── 2 + └── 3 + +*/ + +package aufs + +import ( + "bufio" + "fmt" + "os" + "os/exec" + "path" + "strings" + "sync" + "syscall" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/log" + mountpk "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/utils" + "github.com/docker/libcontainer/label" +) + +var ( + ErrAufsNotSupported = fmt.Errorf("AUFS was not found in /proc/filesystems") + incompatibleFsMagic = []graphdriver.FsMagic{ + graphdriver.FsMagicBtrfs, + graphdriver.FsMagicAufs, + } +) + +func init() { + graphdriver.Register("aufs", Init) +} + +type Driver struct { + root string + sync.Mutex // Protects concurrent modification to active + active map[string]int +} + +// New returns a new AUFS driver. +// An error is returned if AUFS is not supported. +func Init(root string, options []string) (graphdriver.Driver, error) { + // Try to load the aufs kernel module + if err := supportsAufs(); err != nil { + return nil, graphdriver.ErrNotSupported + } + + rootdir := path.Dir(root) + + var buf syscall.Statfs_t + if err := syscall.Statfs(rootdir, &buf); err != nil { + return nil, fmt.Errorf("Couldn't stat the root directory: %s", err) + } + + for _, magic := range incompatibleFsMagic { + if graphdriver.FsMagic(buf.Type) == magic { + return nil, graphdriver.ErrIncompatibleFS + } + } + + paths := []string{ + "mnt", + "diff", + "layers", + } + + a := &Driver{ + root: root, + active: make(map[string]int), + } + + // Create the root aufs driver dir and return + // if it already exists + // If not populate the dir structure + if err := os.MkdirAll(root, 0755); err != nil { + if os.IsExist(err) { + return a, nil + } + return nil, err + } + + if err := graphdriver.MakePrivate(root); err != nil { + return nil, err + } + + for _, p := range paths { + if err := os.MkdirAll(path.Join(root, p), 0755); err != nil { + return nil, err + } + } + return a, nil +} + +// Return a nil error if the kernel supports aufs +// We cannot modprobe because inside dind modprobe fails +// to run +func supportsAufs() error { + // We can try to modprobe aufs first before looking at + // proc/filesystems for when aufs is supported + exec.Command("modprobe", "aufs").Run() + + f, err := os.Open("/proc/filesystems") + if err != nil { + return err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if strings.Contains(s.Text(), "aufs") { + return nil + } + } + return ErrAufsNotSupported +} + +func (a Driver) rootPath() string { + return a.root +} + +func (Driver) String() string { + return "aufs" +} + +func (a Driver) Status() [][2]string { + ids, _ := loadIds(path.Join(a.rootPath(), "layers")) + return [][2]string{ + {"Root Dir", a.rootPath()}, + {"Dirs", fmt.Sprintf("%d", len(ids))}, + } +} + +// Exists returns true if the given id is registered with +// this driver +func (a Driver) Exists(id string) bool { + if _, err := os.Lstat(path.Join(a.rootPath(), "layers", id)); err != nil { + return false + } + return true +} + +// Three folders are created for each id +// mnt, layers, and diff +func (a *Driver) Create(id, parent string) error { + if err := a.createDirsFor(id); err != nil { + return err + } + // Write the layers metadata + f, err := os.Create(path.Join(a.rootPath(), "layers", id)) + if err != nil { + return err + } + defer f.Close() + + if parent != "" { + ids, err := getParentIds(a.rootPath(), parent) + if err != nil { + return err + } + + if _, err := fmt.Fprintln(f, parent); err != nil { + return err + } + for _, i := range ids { + if _, err := fmt.Fprintln(f, i); err != nil { + return err + } + } + } + return nil +} + +func (a *Driver) createDirsFor(id string) error { + paths := []string{ + "mnt", + "diff", + } + + for _, p := range paths { + if err := os.MkdirAll(path.Join(a.rootPath(), p, id), 0755); err != nil { + return err + } + } + return nil +} + +// Unmount and remove the dir information +func (a *Driver) Remove(id string) error { + // Protect the a.active from concurrent access + a.Lock() + defer a.Unlock() + + if a.active[id] != 0 { + log.Errorf("Warning: removing active id %s", id) + } + + // Make sure the dir is umounted first + if err := a.unmount(id); err != nil { + return err + } + tmpDirs := []string{ + "mnt", + "diff", + } + + // Atomically remove each directory in turn by first moving it out of the + // way (so that docker doesn't find it anymore) before doing removal of + // the whole tree. + for _, p := range tmpDirs { + + realPath := path.Join(a.rootPath(), p, id) + tmpPath := path.Join(a.rootPath(), p, fmt.Sprintf("%s-removing", id)) + if err := os.Rename(realPath, tmpPath); err != nil && !os.IsNotExist(err) { + return err + } + defer os.RemoveAll(tmpPath) + } + + // Remove the layers file for the id + if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// Return the rootfs path for the id +// This will mount the dir at it's given path +func (a *Driver) Get(id, mountLabel string) (string, error) { + ids, err := getParentIds(a.rootPath(), id) + if err != nil { + if !os.IsNotExist(err) { + return "", err + } + ids = []string{} + } + + // Protect the a.active from concurrent access + a.Lock() + defer a.Unlock() + + count := a.active[id] + + // If a dir does not have a parent ( no layers )do not try to mount + // just return the diff path to the data + out := path.Join(a.rootPath(), "diff", id) + if len(ids) > 0 { + out = path.Join(a.rootPath(), "mnt", id) + + if count == 0 { + if err := a.mount(id, mountLabel); err != nil { + return "", err + } + } + } + + a.active[id] = count + 1 + + return out, nil +} + +func (a *Driver) Put(id string) { + // Protect the a.active from concurrent access + a.Lock() + defer a.Unlock() + + if count := a.active[id]; count > 1 { + a.active[id] = count - 1 + } else { + ids, _ := getParentIds(a.rootPath(), id) + // We only mounted if there are any parents + if ids != nil && len(ids) > 0 { + a.unmount(id) + } + delete(a.active, id) + } +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (a *Driver) Diff(id, parent string) (archive.Archive, error) { + // AUFS doesn't need the parent layer to produce a diff. + return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ + Compression: archive.Uncompressed, + }) +} + +func (a *Driver) applyDiff(id string, diff archive.ArchiveReader) error { + return archive.Untar(diff, path.Join(a.rootPath(), "diff", id), nil) +} + +// DiffSize calculates the changes between the specified id +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (a *Driver) DiffSize(id, parent string) (bytes int64, err error) { + // AUFS doesn't need the parent layer to calculate the diff size. + return utils.TreeSize(path.Join(a.rootPath(), "diff", id)) +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. +func (a *Driver) ApplyDiff(id, parent string, diff archive.ArchiveReader) (bytes int64, err error) { + // AUFS doesn't need the parent id to apply the diff. + if err = a.applyDiff(id, diff); err != nil { + return + } + + return a.DiffSize(id, parent) +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (a *Driver) Changes(id, parent string) ([]archive.Change, error) { + // AUFS doesn't have snapshots, so we need to get changes from all parent + // layers. + layers, err := a.getParentLayerPaths(id) + if err != nil { + return nil, err + } + return archive.Changes(layers, path.Join(a.rootPath(), "diff", id)) +} + +func (a *Driver) getParentLayerPaths(id string) ([]string, error) { + parentIds, err := getParentIds(a.rootPath(), id) + if err != nil { + return nil, err + } + layers := make([]string, len(parentIds)) + + // Get the diff paths for all the parent ids + for i, p := range parentIds { + layers[i] = path.Join(a.rootPath(), "diff", p) + } + return layers, nil +} + +func (a *Driver) mount(id, mountLabel string) error { + // If the id is mounted or we get an error return + if mounted, err := a.mounted(id); err != nil || mounted { + return err + } + + var ( + target = path.Join(a.rootPath(), "mnt", id) + rw = path.Join(a.rootPath(), "diff", id) + ) + + layers, err := a.getParentLayerPaths(id) + if err != nil { + return err + } + + if err := a.aufsMount(layers, rw, target, mountLabel); err != nil { + return err + } + return nil +} + +func (a *Driver) unmount(id string) error { + if mounted, err := a.mounted(id); err != nil || !mounted { + return err + } + target := path.Join(a.rootPath(), "mnt", id) + return Unmount(target) +} + +func (a *Driver) mounted(id string) (bool, error) { + target := path.Join(a.rootPath(), "mnt", id) + return mountpk.Mounted(target) +} + +// During cleanup aufs needs to unmount all mountpoints +func (a *Driver) Cleanup() error { + ids, err := loadIds(path.Join(a.rootPath(), "layers")) + if err != nil { + return err + } + + for _, id := range ids { + if err := a.unmount(id); err != nil { + log.Errorf("Unmounting %s: %s", utils.TruncateID(id), err) + } + } + + return mountpk.Unmount(a.root) +} + +func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err error) { + defer func() { + if err != nil { + Unmount(target) + } + }() + + if err = a.tryMount(ro, rw, target, mountLabel); err != nil { + if err = a.mountRw(rw, target, mountLabel); err != nil { + return + } + + for _, layer := range ro { + data := label.FormatMountLabel(fmt.Sprintf("append:%s=ro+wh", layer), mountLabel) + if err = mount("none", target, "aufs", MsRemount, data); err != nil { + return + } + } + } + return +} + +// Try to mount using the aufs fast path, if this fails then +// append ro layers. +func (a *Driver) tryMount(ro []string, rw, target, mountLabel string) (err error) { + var ( + rwBranch = fmt.Sprintf("%s=rw", rw) + roBranches = fmt.Sprintf("%s=ro+wh:", strings.Join(ro, "=ro+wh:")) + data = label.FormatMountLabel(fmt.Sprintf("br:%v:%v,xino=/dev/shm/aufs.xino", rwBranch, roBranches), mountLabel) + ) + return mount("none", target, "aufs", 0, data) +} + +func (a *Driver) mountRw(rw, target, mountLabel string) error { + data := label.FormatMountLabel(fmt.Sprintf("br:%s,xino=/dev/shm/aufs.xino", rw), mountLabel) + return mount("none", target, "aufs", 0, data) +} + +func rollbackMount(target string, err error) { + if err != nil { + Unmount(target) + } +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/aufs/aufs_test.go b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/aufs/aufs_test.go new file mode 100644 index 0000000000..cc5b3a2030 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/aufs/aufs_test.go @@ -0,0 +1,697 @@ +package aufs + +import ( + "crypto/sha256" + "encoding/hex" + "fmt" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "io/ioutil" + "os" + "path" + "testing" +) + +var ( + tmp = path.Join(os.TempDir(), "aufs-tests", "aufs") +) + +func testInit(dir string, t *testing.T) graphdriver.Driver { + d, err := Init(dir, nil) + if err != nil { + if err == graphdriver.ErrNotSupported { + t.Skip(err) + } else { + t.Fatal(err) + } + } + return d +} + +func newDriver(t *testing.T) *Driver { + if err := os.MkdirAll(tmp, 0755); err != nil { + t.Fatal(err) + } + + d := testInit(tmp, t) + return d.(*Driver) +} + +func TestNewDriver(t *testing.T) { + if err := os.MkdirAll(tmp, 0755); err != nil { + t.Fatal(err) + } + + d := testInit(tmp, t) + defer os.RemoveAll(tmp) + if d == nil { + t.Fatalf("Driver should not be nil") + } +} + +func TestAufsString(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if d.String() != "aufs" { + t.Fatalf("Expected aufs got %s", d.String()) + } +} + +func TestCreateDirStructure(t *testing.T) { + newDriver(t) + defer os.RemoveAll(tmp) + + paths := []string{ + "mnt", + "layers", + "diff", + } + + for _, p := range paths { + if _, err := os.Stat(path.Join(tmp, p)); err != nil { + t.Fatal(err) + } + } +} + +// We should be able to create two drivers with the same dir structure +func TestNewDriverFromExistingDir(t *testing.T) { + if err := os.MkdirAll(tmp, 0755); err != nil { + t.Fatal(err) + } + + testInit(tmp, t) + testInit(tmp, t) + os.RemoveAll(tmp) +} + +func TestCreateNewDir(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } +} + +func TestCreateNewDirStructure(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + paths := []string{ + "mnt", + "diff", + "layers", + } + + for _, p := range paths { + if _, err := os.Stat(path.Join(tmp, p, "1")); err != nil { + t.Fatal(err) + } + } +} + +func TestRemoveImage(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + if err := d.Remove("1"); err != nil { + t.Fatal(err) + } + + paths := []string{ + "mnt", + "diff", + "layers", + } + + for _, p := range paths { + if _, err := os.Stat(path.Join(tmp, p, "1")); err == nil { + t.Fatalf("Error should not be nil because dirs with id 1 should be delted: %s", p) + } + } +} + +func TestGetWithoutParent(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1", "") + if err != nil { + t.Fatal(err) + } + expected := path.Join(tmp, "diff", "1") + if diffPath != expected { + t.Fatalf("Expected path %s got %s", expected, diffPath) + } +} + +func TestCleanupWithNoDirs(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } +} + +func TestCleanupWithDir(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } +} + +func TestMountedFalseResponse(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + response, err := d.mounted("1") + if err != nil { + t.Fatal(err) + } + + if response != false { + t.Fatalf("Response if dir id 1 is mounted should be false") + } +} + +func TestMountedTrueReponse(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + if err := d.Create("2", "1"); err != nil { + t.Fatal(err) + } + + _, err := d.Get("2", "") + if err != nil { + t.Fatal(err) + } + + response, err := d.mounted("2") + if err != nil { + t.Fatal(err) + } + + if response != true { + t.Fatalf("Response if dir id 2 is mounted should be true") + } +} + +func TestMountWithParent(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + if err := d.Create("2", "1"); err != nil { + t.Fatal(err) + } + + defer func() { + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } + }() + + mntPath, err := d.Get("2", "") + if err != nil { + t.Fatal(err) + } + if mntPath == "" { + t.Fatal("mntPath should not be empty string") + } + + expected := path.Join(tmp, "mnt", "2") + if mntPath != expected { + t.Fatalf("Expected %s got %s", expected, mntPath) + } +} + +func TestRemoveMountedDir(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + if err := d.Create("2", "1"); err != nil { + t.Fatal(err) + } + + defer func() { + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } + }() + + mntPath, err := d.Get("2", "") + if err != nil { + t.Fatal(err) + } + if mntPath == "" { + t.Fatal("mntPath should not be empty string") + } + + mounted, err := d.mounted("2") + if err != nil { + t.Fatal(err) + } + + if !mounted { + t.Fatalf("Dir id 2 should be mounted") + } + + if err := d.Remove("2"); err != nil { + t.Fatal(err) + } +} + +func TestCreateWithInvalidParent(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "docker"); err == nil { + t.Fatalf("Error should not be nil with parent does not exist") + } +} + +func TestGetDiff(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1", "") + if err != nil { + t.Fatal(err) + } + + // Add a file to the diff path with a fixed size + size := int64(1024) + + f, err := os.Create(path.Join(diffPath, "test_file")) + if err != nil { + t.Fatal(err) + } + if err := f.Truncate(size); err != nil { + t.Fatal(err) + } + f.Close() + + a, err := d.Diff("1", "") + if err != nil { + t.Fatal(err) + } + if a == nil { + t.Fatalf("Archive should not be nil") + } +} + +func TestChanges(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + if err := d.Create("2", "1"); err != nil { + t.Fatal(err) + } + + defer func() { + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } + }() + + mntPoint, err := d.Get("2", "") + if err != nil { + t.Fatal(err) + } + + // Create a file to save in the mountpoint + f, err := os.Create(path.Join(mntPoint, "test.txt")) + if err != nil { + t.Fatal(err) + } + + if _, err := f.WriteString("testline"); err != nil { + t.Fatal(err) + } + if err := f.Close(); err != nil { + t.Fatal(err) + } + + changes, err := d.Changes("2", "") + if err != nil { + t.Fatal(err) + } + if len(changes) != 1 { + t.Fatalf("Dir 2 should have one change from parent got %d", len(changes)) + } + change := changes[0] + + expectedPath := "/test.txt" + if change.Path != expectedPath { + t.Fatalf("Expected path %s got %s", expectedPath, change.Path) + } + + if change.Kind != archive.ChangeAdd { + t.Fatalf("Change kind should be ChangeAdd got %s", change.Kind) + } + + if err := d.Create("3", "2"); err != nil { + t.Fatal(err) + } + mntPoint, err = d.Get("3", "") + if err != nil { + t.Fatal(err) + } + + // Create a file to save in the mountpoint + f, err = os.Create(path.Join(mntPoint, "test2.txt")) + if err != nil { + t.Fatal(err) + } + + if _, err := f.WriteString("testline"); err != nil { + t.Fatal(err) + } + if err := f.Close(); err != nil { + t.Fatal(err) + } + + changes, err = d.Changes("3", "") + if err != nil { + t.Fatal(err) + } + + if len(changes) != 1 { + t.Fatalf("Dir 2 should have one change from parent got %d", len(changes)) + } + change = changes[0] + + expectedPath = "/test2.txt" + if change.Path != expectedPath { + t.Fatalf("Expected path %s got %s", expectedPath, change.Path) + } + + if change.Kind != archive.ChangeAdd { + t.Fatalf("Change kind should be ChangeAdd got %s", change.Kind) + } +} + +func TestDiffSize(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1", "") + if err != nil { + t.Fatal(err) + } + + // Add a file to the diff path with a fixed size + size := int64(1024) + + f, err := os.Create(path.Join(diffPath, "test_file")) + if err != nil { + t.Fatal(err) + } + if err := f.Truncate(size); err != nil { + t.Fatal(err) + } + s, err := f.Stat() + if err != nil { + t.Fatal(err) + } + size = s.Size() + if err := f.Close(); err != nil { + t.Fatal(err) + } + + diffSize, err := d.DiffSize("1", "") + if err != nil { + t.Fatal(err) + } + if diffSize != size { + t.Fatalf("Expected size to be %d got %d", size, diffSize) + } +} + +func TestChildDiffSize(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1", "") + if err != nil { + t.Fatal(err) + } + + // Add a file to the diff path with a fixed size + size := int64(1024) + + f, err := os.Create(path.Join(diffPath, "test_file")) + if err != nil { + t.Fatal(err) + } + if err := f.Truncate(size); err != nil { + t.Fatal(err) + } + s, err := f.Stat() + if err != nil { + t.Fatal(err) + } + size = s.Size() + if err := f.Close(); err != nil { + t.Fatal(err) + } + + diffSize, err := d.DiffSize("1", "") + if err != nil { + t.Fatal(err) + } + if diffSize != size { + t.Fatalf("Expected size to be %d got %d", size, diffSize) + } + + if err := d.Create("2", "1"); err != nil { + t.Fatal(err) + } + + diffSize, err = d.DiffSize("2", "") + if err != nil { + t.Fatal(err) + } + // The diff size for the child should be zero + if diffSize != 0 { + t.Fatalf("Expected size to be %d got %d", 0, diffSize) + } +} + +func TestExists(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + if d.Exists("none") { + t.Fatal("id name should not exist in the driver") + } + + if !d.Exists("1") { + t.Fatal("id 1 should exist in the driver") + } +} + +func TestStatus(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + status := d.Status() + if status == nil || len(status) == 0 { + t.Fatal("Status should not be nil or empty") + } + rootDir := status[0] + dirs := status[1] + if rootDir[0] != "Root Dir" { + t.Fatalf("Expected Root Dir got %s", rootDir[0]) + } + if rootDir[1] != d.rootPath() { + t.Fatalf("Expected %s got %s", d.rootPath(), rootDir[1]) + } + if dirs[0] != "Dirs" { + t.Fatalf("Expected Dirs got %s", dirs[0]) + } + if dirs[1] != "1" { + t.Fatalf("Expected 1 got %s", dirs[1]) + } +} + +func TestApplyDiff(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1", "") + if err != nil { + t.Fatal(err) + } + + // Add a file to the diff path with a fixed size + size := int64(1024) + + f, err := os.Create(path.Join(diffPath, "test_file")) + if err != nil { + t.Fatal(err) + } + if err := f.Truncate(size); err != nil { + t.Fatal(err) + } + f.Close() + + diff, err := d.Diff("1", "") + if err != nil { + t.Fatal(err) + } + + if err := d.Create("2", ""); err != nil { + t.Fatal(err) + } + if err := d.Create("3", "2"); err != nil { + t.Fatal(err) + } + + if err := d.applyDiff("3", diff); err != nil { + t.Fatal(err) + } + + // Ensure that the file is in the mount point for id 3 + + mountPoint, err := d.Get("3", "") + if err != nil { + t.Fatal(err) + } + if _, err := os.Stat(path.Join(mountPoint, "test_file")); err != nil { + t.Fatal(err) + } +} + +func hash(c string) string { + h := sha256.New() + fmt.Fprint(h, c) + return hex.EncodeToString(h.Sum(nil)) +} + +func TestMountMoreThan42Layers(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + var last string + var expected int + + for i := 1; i < 127; i++ { + expected++ + var ( + parent = fmt.Sprintf("%d", i-1) + current = fmt.Sprintf("%d", i) + ) + + if parent == "0" { + parent = "" + } else { + parent = hash(parent) + } + current = hash(current) + + if err := d.Create(current, parent); err != nil { + t.Logf("Current layer %d", i) + t.Fatal(err) + } + point, err := d.Get(current, "") + if err != nil { + t.Logf("Current layer %d", i) + t.Fatal(err) + } + f, err := os.Create(path.Join(point, current)) + if err != nil { + t.Logf("Current layer %d", i) + t.Fatal(err) + } + f.Close() + + if i%10 == 0 { + if err := os.Remove(path.Join(point, parent)); err != nil { + t.Logf("Current layer %d", i) + t.Fatal(err) + } + expected-- + } + last = current + } + + // Perform the actual mount for the top most image + point, err := d.Get(last, "") + if err != nil { + t.Fatal(err) + } + files, err := ioutil.ReadDir(point) + if err != nil { + t.Fatal(err) + } + if len(files) != expected { + t.Fatalf("Expected %d got %d", expected, len(files)) + } +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/aufs/dirs.go b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/aufs/dirs.go new file mode 100644 index 0000000000..fb9b81edd2 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/aufs/dirs.go @@ -0,0 +1,46 @@ +package aufs + +import ( + "bufio" + "io/ioutil" + "os" + "path" +) + +// Return all the directories +func loadIds(root string) ([]string, error) { + dirs, err := ioutil.ReadDir(root) + if err != nil { + return nil, err + } + out := []string{} + for _, d := range dirs { + if !d.IsDir() { + out = append(out, d.Name()) + } + } + return out, nil +} + +// Read the layers file for the current id and return all the +// layers represented by new lines in the file +// +// If there are no lines in the file then the id has no parent +// and an empty slice is returned. +func getParentIds(root, id string) ([]string, error) { + f, err := os.Open(path.Join(root, "layers", id)) + if err != nil { + return nil, err + } + defer f.Close() + + out := []string{} + s := bufio.NewScanner(f) + + for s.Scan() { + if t := s.Text(); t != "" { + out = append(out, s.Text()) + } + } + return out, s.Err() +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/aufs/migrate.go b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/aufs/migrate.go new file mode 100644 index 0000000000..dda7cb7390 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/aufs/migrate.go @@ -0,0 +1,194 @@ +package aufs + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path" +) + +type metadata struct { + ID string `json:"id"` + ParentID string `json:"parent,omitempty"` + Image string `json:"Image,omitempty"` + + parent *metadata +} + +func pathExists(pth string) bool { + if _, err := os.Stat(pth); err != nil { + return false + } + return true +} + +// Migrate existing images and containers from docker < 0.7.x +// +// The format pre 0.7 is for docker to store the metadata and filesystem +// content in the same directory. For the migration to work we need to move Image layer +// data from /var/lib/docker/graph//layers to the diff of the registered id. +// +// Next we need to migrate the container's rw layer to diff of the driver. After the +// contents are migrated we need to register the image and container ids with the +// driver. +// +// For the migration we try to move the folder containing the layer files, if that +// fails because the data is currently mounted we will fallback to creating a +// symlink. +func (a *Driver) Migrate(pth string, setupInit func(p string) error) error { + if pathExists(path.Join(pth, "graph")) { + if err := a.migrateRepositories(pth); err != nil { + return err + } + if err := a.migrateImages(path.Join(pth, "graph")); err != nil { + return err + } + return a.migrateContainers(path.Join(pth, "containers"), setupInit) + } + return nil +} + +func (a *Driver) migrateRepositories(pth string) error { + name := path.Join(pth, "repositories") + if err := os.Rename(name, name+"-aufs"); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +func (a *Driver) migrateContainers(pth string, setupInit func(p string) error) error { + fis, err := ioutil.ReadDir(pth) + if err != nil { + return err + } + + for _, fi := range fis { + if id := fi.Name(); fi.IsDir() && pathExists(path.Join(pth, id, "rw")) { + if err := tryRelocate(path.Join(pth, id, "rw"), path.Join(a.rootPath(), "diff", id)); err != nil { + return err + } + + if !a.Exists(id) { + + metadata, err := loadMetadata(path.Join(pth, id, "config.json")) + if err != nil { + return err + } + + initID := fmt.Sprintf("%s-init", id) + if err := a.Create(initID, metadata.Image); err != nil { + return err + } + + initPath, err := a.Get(initID, "") + if err != nil { + return err + } + // setup init layer + if err := setupInit(initPath); err != nil { + return err + } + + if err := a.Create(id, initID); err != nil { + return err + } + } + } + } + return nil +} + +func (a *Driver) migrateImages(pth string) error { + fis, err := ioutil.ReadDir(pth) + if err != nil { + return err + } + var ( + m = make(map[string]*metadata) + current *metadata + exists bool + ) + + for _, fi := range fis { + if id := fi.Name(); fi.IsDir() && pathExists(path.Join(pth, id, "layer")) { + if current, exists = m[id]; !exists { + current, err = loadMetadata(path.Join(pth, id, "json")) + if err != nil { + return err + } + m[id] = current + } + } + } + + for _, v := range m { + v.parent = m[v.ParentID] + } + + migrated := make(map[string]bool) + for _, v := range m { + if err := a.migrateImage(v, pth, migrated); err != nil { + return err + } + } + return nil +} + +func (a *Driver) migrateImage(m *metadata, pth string, migrated map[string]bool) error { + if !migrated[m.ID] { + if m.parent != nil { + a.migrateImage(m.parent, pth, migrated) + } + if err := tryRelocate(path.Join(pth, m.ID, "layer"), path.Join(a.rootPath(), "diff", m.ID)); err != nil { + return err + } + if !a.Exists(m.ID) { + if err := a.Create(m.ID, m.ParentID); err != nil { + return err + } + } + migrated[m.ID] = true + } + return nil +} + +// tryRelocate will try to rename the old path to the new pack and if +// the operation fails, it will fallback to a symlink +func tryRelocate(oldPath, newPath string) error { + s, err := os.Lstat(newPath) + if err != nil && !os.IsNotExist(err) { + return err + } + // If the destination is a symlink then we already tried to relocate once before + // and it failed so we delete it and try to remove + if s != nil && s.Mode()&os.ModeSymlink == os.ModeSymlink { + if err := os.RemoveAll(newPath); err != nil { + return err + } + } + if err := os.Rename(oldPath, newPath); err != nil { + if sErr := os.Symlink(oldPath, newPath); sErr != nil { + return fmt.Errorf("Unable to relocate %s to %s: Rename err %s Symlink err %s", oldPath, newPath, err, sErr) + } + } + return nil +} + +func loadMetadata(pth string) (*metadata, error) { + f, err := os.Open(pth) + if err != nil { + return nil, err + } + defer f.Close() + + var ( + out = &metadata{} + dec = json.NewDecoder(f) + ) + + if err := dec.Decode(out); err != nil { + return nil, err + } + return out, nil +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/aufs/mount.go b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/aufs/mount.go new file mode 100644 index 0000000000..fa74e05b07 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/aufs/mount.go @@ -0,0 +1,18 @@ +package aufs + +import ( + "os/exec" + "syscall" + + "github.com/docker/docker/pkg/log" +) + +func Unmount(target string) error { + if err := exec.Command("auplink", target, "flush").Run(); err != nil { + log.Errorf("[warning]: couldn't run auplink before unmount: %s", err) + } + if err := syscall.Unmount(target, 0); err != nil { + return err + } + return nil +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/aufs/mount_linux.go b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/aufs/mount_linux.go new file mode 100644 index 0000000000..c86f1bbd63 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/aufs/mount_linux.go @@ -0,0 +1,9 @@ +package aufs + +import "syscall" + +const MsRemount = syscall.MS_REMOUNT + +func mount(source string, target string, fstype string, flags uintptr, data string) error { + return syscall.Mount(source, target, fstype, flags, data) +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/aufs/mount_unsupported.go b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/aufs/mount_unsupported.go new file mode 100644 index 0000000000..e291bef3aa --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/aufs/mount_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux + +package aufs + +import "errors" + +const MsRemount = 0 + +func mount(source string, target string, fstype string, flags uintptr, data string) (err error) { + return errors.New("mount is not implemented on darwin") +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/btrfs/MAINTAINERS b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/btrfs/MAINTAINERS new file mode 100644 index 0000000000..9e629d5fcc --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/btrfs/MAINTAINERS @@ -0,0 +1 @@ +Alexander Larsson (@alexlarsson) diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/btrfs/btrfs.go b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/btrfs/btrfs.go new file mode 100644 index 0000000000..26102aa1ef --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/btrfs/btrfs.go @@ -0,0 +1,225 @@ +// +build linux + +package btrfs + +/* +#include +#include +#include +*/ +import "C" + +import ( + "fmt" + "os" + "path" + "syscall" + "unsafe" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/mount" +) + +func init() { + graphdriver.Register("btrfs", Init) +} + +func Init(home string, options []string) (graphdriver.Driver, error) { + rootdir := path.Dir(home) + + var buf syscall.Statfs_t + if err := syscall.Statfs(rootdir, &buf); err != nil { + return nil, err + } + + if graphdriver.FsMagic(buf.Type) != graphdriver.FsMagicBtrfs { + return nil, graphdriver.ErrPrerequisites + } + + if err := os.MkdirAll(home, 0700); err != nil { + return nil, err + } + + if err := graphdriver.MakePrivate(home); err != nil { + return nil, err + } + + driver := &Driver{ + home: home, + } + + return graphdriver.NaiveDiffDriver(driver), nil +} + +type Driver struct { + home string +} + +func (d *Driver) String() string { + return "btrfs" +} + +func (d *Driver) Status() [][2]string { + return nil +} + +func (d *Driver) Cleanup() error { + return mount.Unmount(d.home) +} + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +func openDir(path string) (*C.DIR, error) { + Cpath := C.CString(path) + defer free(Cpath) + + dir := C.opendir(Cpath) + if dir == nil { + return nil, fmt.Errorf("Can't open dir") + } + return dir, nil +} + +func closeDir(dir *C.DIR) { + if dir != nil { + C.closedir(dir) + } +} + +func getDirFd(dir *C.DIR) uintptr { + return uintptr(C.dirfd(dir)) +} + +func subvolCreate(path, name string) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_vol_args + for i, c := range []byte(name) { + args.name[i] = C.char(c) + } + + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SUBVOL_CREATE, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to create btrfs subvolume: %v", errno.Error()) + } + return nil +} + +func subvolSnapshot(src, dest, name string) error { + srcDir, err := openDir(src) + if err != nil { + return err + } + defer closeDir(srcDir) + + destDir, err := openDir(dest) + if err != nil { + return err + } + defer closeDir(destDir) + + var args C.struct_btrfs_ioctl_vol_args_v2 + args.fd = C.__s64(getDirFd(srcDir)) + for i, c := range []byte(name) { + args.name[i] = C.char(c) + } + + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(destDir), C.BTRFS_IOC_SNAP_CREATE_V2, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to create btrfs snapshot: %v", errno.Error()) + } + return nil +} + +func subvolDelete(path, name string) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_vol_args + for i, c := range []byte(name) { + args.name[i] = C.char(c) + } + + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SNAP_DESTROY, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to destroy btrfs snapshot: %v", errno.Error()) + } + return nil +} + +func (d *Driver) subvolumesDir() string { + return path.Join(d.home, "subvolumes") +} + +func (d *Driver) subvolumesDirId(id string) string { + return path.Join(d.subvolumesDir(), id) +} + +func (d *Driver) Create(id string, parent string) error { + subvolumes := path.Join(d.home, "subvolumes") + if err := os.MkdirAll(subvolumes, 0700); err != nil { + return err + } + if parent == "" { + if err := subvolCreate(subvolumes, id); err != nil { + return err + } + } else { + parentDir, err := d.Get(parent, "") + if err != nil { + return err + } + if err := subvolSnapshot(parentDir, subvolumes, id); err != nil { + return err + } + } + return nil +} + +func (d *Driver) Remove(id string) error { + dir := d.subvolumesDirId(id) + if _, err := os.Stat(dir); err != nil { + return err + } + if err := subvolDelete(d.subvolumesDir(), id); err != nil { + return err + } + return os.RemoveAll(dir) +} + +func (d *Driver) Get(id, mountLabel string) (string, error) { + dir := d.subvolumesDirId(id) + st, err := os.Stat(dir) + if err != nil { + return "", err + } + + if !st.IsDir() { + return "", fmt.Errorf("%s: not a directory", dir) + } + + return dir, nil +} + +func (d *Driver) Put(id string) { + // Get() creates no runtime resources (like e.g. mounts) + // so this doesn't need to do anything. +} + +func (d *Driver) Exists(id string) bool { + dir := d.subvolumesDirId(id) + _, err := os.Stat(dir) + return err == nil +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/btrfs/btrfs_test.go b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/btrfs/btrfs_test.go new file mode 100644 index 0000000000..cde23ce4a0 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/btrfs/btrfs_test.go @@ -0,0 +1,28 @@ +package btrfs + +import ( + "github.com/docker/docker/daemon/graphdriver/graphtest" + "testing" +) + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestBtrfsSetup and TestBtrfsTeardown +func TestBtrfsSetup(t *testing.T) { + graphtest.GetDriver(t, "btrfs") +} + +func TestBtrfsCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "btrfs") +} + +func TestBtrfsCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "btrfs") +} + +func TestBtrfsCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "btrfs") +} + +func TestBtrfsTeardown(t *testing.T) { + graphtest.PutDriver(t) +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/btrfs/dummy_unsupported.go b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/btrfs/dummy_unsupported.go new file mode 100644 index 0000000000..f07088887a --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/btrfs/dummy_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux !cgo + +package btrfs diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/MAINTAINERS b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/MAINTAINERS new file mode 100644 index 0000000000..9e629d5fcc --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/MAINTAINERS @@ -0,0 +1 @@ +Alexander Larsson (@alexlarsson) diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/README.md b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/README.md new file mode 100644 index 0000000000..c42620247b --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/README.md @@ -0,0 +1,156 @@ +## devicemapper - a storage backend based on Device Mapper + +### Theory of operation + +The device mapper graphdriver uses the device mapper thin provisioning +module (dm-thinp) to implement CoW snapshots. For each devicemapper +graph location (typically `/var/lib/docker/devicemapper`, $graph below) +a thin pool is created based on two block devices, one for data and +one for metadata. By default these block devices are created +automatically by using loopback mounts of automatically created sparse +files. + +The default loopback files used are `$graph/devicemapper/data` and +`$graph/devicemapper/metadata`. Additional metadata required to map +from docker entities to the corresponding devicemapper volumes is +stored in the `$graph/devicemapper/json` file (encoded as Json). + +In order to support multiple devicemapper graphs on a system, the thin +pool will be named something like: `docker-0:33-19478248-pool`, where +the `0:33` part is the minor/major device nr and `19478248` is the +inode number of the $graph directory. + +On the thin pool, docker automatically creates a base thin device, +called something like `docker-0:33-19478248-base` of a fixed +size. This is automatically formatted with an empty filesystem on +creation. This device is the base of all docker images and +containers. All base images are snapshots of this device and those +images are then in turn used as snapshots for other images and +eventually containers. + +### options + +The devicemapper backend supports some options that you can specify +when starting the docker daemon using the `--storage-opt` flags. +This uses the `dm` prefix and would be used something like `docker -d --storage-opt dm.foo=bar`. + +Here is the list of supported options: + + * `dm.basesize` + + Specifies the size to use when creating the base device, which + limits the size of images and containers. The default value is + 10G. Note, thin devices are inherently "sparse", so a 10G device + which is mostly empty doesn't use 10 GB of space on the + pool. However, the filesystem will use more space for the empty + case the larger the device is. **Warning**: This value affects the + system-wide "base" empty filesystem that may already be + initialized and inherited by pulled images. Typically, a change + to this value will require additional steps to take effect: 1) + stop `docker -d`, 2) `rm -rf /var/lib/docker`, 3) start `docker -d`. + + Example use: + + ``docker -d --storage-opt dm.basesize=20G`` + + * `dm.loopdatasize` + + Specifies the size to use when creating the loopback file for the + "data" device which is used for the thin pool. The default size is + 100G. Note that the file is sparse, so it will not initially take + up this much space. + + Example use: + + ``docker -d --storage-opt dm.loopdatasize=200G`` + + * `dm.loopmetadatasize` + + Specifies the size to use when creating the loopback file for the + "metadadata" device which is used for the thin pool. The default size is + 2G. Note that the file is sparse, so it will not initially take + up this much space. + + Example use: + + ``docker -d --storage-opt dm.loopmetadatasize=4G`` + + * `dm.fs` + + Specifies the filesystem type to use for the base device. The supported + options are "ext4" and "xfs". The default is "ext4" + + Example use: + + ``docker -d --storage-opt dm.fs=xfs`` + + * `dm.mkfsarg` + + Specifies extra mkfs arguments to be used when creating the base device. + + Example use: + + ``docker -d --storage-opt "dm.mkfsarg=-O ^has_journal"`` + + * `dm.mountopt` + + Specifies extra mount options used when mounting the thin devices. + + Example use: + + ``docker -d --storage-opt dm.mountopt=nodiscard`` + + * `dm.datadev` + + Specifies a custom blockdevice to use for data for the thin pool. + + If using a block device for device mapper storage, ideally both + datadev and metadatadev should be specified to completely avoid + using the loopback device. + + Example use: + + ``docker -d --storage-opt dm.datadev=/dev/sdb1 --storage-opt dm.metadatadev=/dev/sdc1`` + + * `dm.metadatadev` + + Specifies a custom blockdevice to use for metadata for the thin + pool. + + For best performance the metadata should be on a different spindle + than the data, or even better on an SSD. + + If setting up a new metadata pool it is required to be valid. This + can be achieved by zeroing the first 4k to indicate empty + metadata, like this: + + ``dd if=/dev/zero of=$metadata_dev bs=4096 count=1``` + + Example use: + + ``docker -d --storage-opt dm.datadev=/dev/sdb1 --storage-opt dm.metadatadev=/dev/sdc1`` + + * `dm.blocksize` + + Specifies a custom blocksize to use for the thin pool. The default + blocksize is 64K. + + Example use: + + ``docker -d --storage-opt dm.blocksize=512K`` + + * `dm.blkdiscard` + + Enables or disables the use of blkdiscard when removing + devicemapper devices. This is enabled by default (only) if using + loopback devices and is required to res-parsify the loopback file + on image/container removal. + + Disabling this on loopback can lead to *much* faster container + removal times, but will make the space used in /var/lib/docker + directory not be returned to the system for other use when + containers are removed. + + Example use: + + ``docker -d --storage-opt dm.blkdiscard=false`` diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/attach_loopback.go b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/attach_loopback.go new file mode 100644 index 0000000000..9cfa18a4d3 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/attach_loopback.go @@ -0,0 +1,129 @@ +// +build linux + +package devmapper + +import ( + "fmt" + "os" + "syscall" + + "github.com/docker/docker/pkg/log" +) + +func stringToLoopName(src string) [LoNameSize]uint8 { + var dst [LoNameSize]uint8 + copy(dst[:], src[:]) + return dst +} + +func getNextFreeLoopbackIndex() (int, error) { + f, err := os.OpenFile("/dev/loop-control", os.O_RDONLY, 0644) + if err != nil { + return 0, err + } + defer f.Close() + + index, err := ioctlLoopCtlGetFree(f.Fd()) + if index < 0 { + index = 0 + } + return index, err +} + +func openNextAvailableLoopback(index int, sparseFile *os.File) (loopFile *os.File, err error) { + // Start looking for a free /dev/loop + for { + target := fmt.Sprintf("/dev/loop%d", index) + index++ + + fi, err := os.Stat(target) + if err != nil { + if os.IsNotExist(err) { + log.Errorf("There are no more loopback devices available.") + } + return nil, ErrAttachLoopbackDevice + } + + if fi.Mode()&os.ModeDevice != os.ModeDevice { + log.Errorf("Loopback device %s is not a block device.", target) + continue + } + + // OpenFile adds O_CLOEXEC + loopFile, err = os.OpenFile(target, os.O_RDWR, 0644) + if err != nil { + log.Errorf("Error opening loopback device: %s", err) + return nil, ErrAttachLoopbackDevice + } + + // Try to attach to the loop file + if err := ioctlLoopSetFd(loopFile.Fd(), sparseFile.Fd()); err != nil { + loopFile.Close() + + // If the error is EBUSY, then try the next loopback + if err != syscall.EBUSY { + log.Errorf("Cannot set up loopback device %s: %s", target, err) + return nil, ErrAttachLoopbackDevice + } + + // Otherwise, we keep going with the loop + continue + } + // In case of success, we finished. Break the loop. + break + } + + // This can't happen, but let's be sure + if loopFile == nil { + log.Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name()) + return nil, ErrAttachLoopbackDevice + } + + return loopFile, nil +} + +// attachLoopDevice attaches the given sparse file to the next +// available loopback device. It returns an opened *os.File. +func attachLoopDevice(sparseName string) (loop *os.File, err error) { + + // Try to retrieve the next available loopback device via syscall. + // If it fails, we discard error and start loopking for a + // loopback from index 0. + startIndex, err := getNextFreeLoopbackIndex() + if err != nil { + log.Debugf("Error retrieving the next available loopback: %s", err) + } + + // OpenFile adds O_CLOEXEC + sparseFile, err := os.OpenFile(sparseName, os.O_RDWR, 0644) + if err != nil { + log.Errorf("Error opening sparse file %s: %s", sparseName, err) + return nil, ErrAttachLoopbackDevice + } + defer sparseFile.Close() + + loopFile, err := openNextAvailableLoopback(startIndex, sparseFile) + if err != nil { + return nil, err + } + + // Set the status of the loopback device + loopInfo := &LoopInfo64{ + loFileName: stringToLoopName(loopFile.Name()), + loOffset: 0, + loFlags: LoFlagsAutoClear, + } + + if err := ioctlLoopSetStatus64(loopFile.Fd(), loopInfo); err != nil { + log.Errorf("Cannot set up loopback device info: %s", err) + + // If the call failed, then free the loopback device + if err := ioctlLoopClrFd(loopFile.Fd()); err != nil { + log.Errorf("Error while cleaning up the loopback device") + } + loopFile.Close() + return nil, ErrAttachLoopbackDevice + } + + return loopFile, nil +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/deviceset.go b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/deviceset.go new file mode 100644 index 0000000000..ccaea0181e --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/deviceset.go @@ -0,0 +1,1253 @@ +// +build linux + +package devmapper + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/units" + "github.com/docker/libcontainer/label" +) + +var ( + DefaultDataLoopbackSize int64 = 100 * 1024 * 1024 * 1024 + DefaultMetaDataLoopbackSize int64 = 2 * 1024 * 1024 * 1024 + DefaultBaseFsSize uint64 = 10 * 1024 * 1024 * 1024 + DefaultThinpBlockSize uint32 = 128 // 64K = 128 512b sectors +) + +type DevInfo struct { + Hash string `json:"-"` + DeviceId int `json:"device_id"` + Size uint64 `json:"size"` + TransactionId uint64 `json:"transaction_id"` + Initialized bool `json:"initialized"` + devices *DeviceSet `json:"-"` + + mountCount int `json:"-"` + mountPath string `json:"-"` + + // The global DeviceSet lock guarantees that we serialize all + // the calls to libdevmapper (which is not threadsafe), but we + // sometimes release that lock while sleeping. In that case + // this per-device lock is still held, protecting against + // other accesses to the device that we're doing the wait on. + // + // WARNING: In order to avoid AB-BA deadlocks when releasing + // the global lock while holding the per-device locks all + // device locks must be aquired *before* the device lock, and + // multiple device locks should be aquired parent before child. + lock sync.Mutex `json:"-"` +} + +type MetaData struct { + Devices map[string]*DevInfo `json:"Devices"` + devicesLock sync.Mutex `json:"-"` // Protects all read/writes to Devices map +} + +type DeviceSet struct { + MetaData + sync.Mutex // Protects Devices map and serializes calls into libdevmapper + root string + devicePrefix string + TransactionId uint64 + NewTransactionId uint64 + nextDeviceId int + + // Options + dataLoopbackSize int64 + metaDataLoopbackSize int64 + baseFsSize uint64 + filesystem string + mountOptions string + mkfsArgs []string + dataDevice string + metadataDevice string + doBlkDiscard bool + thinpBlockSize uint32 +} + +type DiskUsage struct { + Used uint64 + Total uint64 +} + +type Status struct { + PoolName string + DataLoopback string + MetadataLoopback string + Data DiskUsage + Metadata DiskUsage + SectorSize uint64 +} + +type DevStatus struct { + DeviceId int + Size uint64 + TransactionId uint64 + SizeInSectors uint64 + MappedSectors uint64 + HighestMappedSector uint64 +} + +func getDevName(name string) string { + return "/dev/mapper/" + name +} + +func (info *DevInfo) Name() string { + hash := info.Hash + if hash == "" { + hash = "base" + } + return fmt.Sprintf("%s-%s", info.devices.devicePrefix, hash) +} + +func (info *DevInfo) DevName() string { + return getDevName(info.Name()) +} + +func (devices *DeviceSet) loopbackDir() string { + return path.Join(devices.root, "devicemapper") +} + +func (devices *DeviceSet) metadataDir() string { + return path.Join(devices.root, "metadata") +} + +func (devices *DeviceSet) metadataFile(info *DevInfo) string { + file := info.Hash + if file == "" { + file = "base" + } + return path.Join(devices.metadataDir(), file) +} + +func (devices *DeviceSet) oldMetadataFile() string { + return path.Join(devices.loopbackDir(), "json") +} + +func (devices *DeviceSet) getPoolName() string { + return devices.devicePrefix + "-pool" +} + +func (devices *DeviceSet) getPoolDevName() string { + return getDevName(devices.getPoolName()) +} + +func (devices *DeviceSet) hasImage(name string) bool { + dirname := devices.loopbackDir() + filename := path.Join(dirname, name) + + _, err := os.Stat(filename) + return err == nil +} + +// ensureImage creates a sparse file of bytes at the path +// /devicemapper/. +// If the file already exists, it does nothing. +// Either way it returns the full path. +func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) { + dirname := devices.loopbackDir() + filename := path.Join(dirname, name) + + if err := os.MkdirAll(dirname, 0700); err != nil && !os.IsExist(err) { + return "", err + } + + if _, err := os.Stat(filename); err != nil { + if !os.IsNotExist(err) { + return "", err + } + log.Debugf("Creating loopback file %s for device-manage use", filename) + file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600) + if err != nil { + return "", err + } + defer file.Close() + + if err = file.Truncate(size); err != nil { + return "", err + } + } + return filename, nil +} + +func (devices *DeviceSet) allocateTransactionId() uint64 { + devices.NewTransactionId = devices.NewTransactionId + 1 + return devices.NewTransactionId +} + +func (devices *DeviceSet) removeMetadata(info *DevInfo) error { + if err := os.RemoveAll(devices.metadataFile(info)); err != nil { + return fmt.Errorf("Error removing metadata file %s: %s", devices.metadataFile(info), err) + } + return nil +} + +func (devices *DeviceSet) saveMetadata(info *DevInfo) error { + jsonData, err := json.Marshal(info) + if err != nil { + return fmt.Errorf("Error encoding metadata to json: %s", err) + } + tmpFile, err := ioutil.TempFile(devices.metadataDir(), ".tmp") + if err != nil { + return fmt.Errorf("Error creating metadata file: %s", err) + } + + n, err := tmpFile.Write(jsonData) + if err != nil { + return fmt.Errorf("Error writing metadata to %s: %s", tmpFile.Name(), err) + } + if n < len(jsonData) { + return io.ErrShortWrite + } + if err := tmpFile.Sync(); err != nil { + return fmt.Errorf("Error syncing metadata file %s: %s", tmpFile.Name(), err) + } + if err := tmpFile.Close(); err != nil { + return fmt.Errorf("Error closing metadata file %s: %s", tmpFile.Name(), err) + } + if err := os.Rename(tmpFile.Name(), devices.metadataFile(info)); err != nil { + return fmt.Errorf("Error committing metadata file %s: %s", tmpFile.Name(), err) + } + + if devices.NewTransactionId != devices.TransactionId { + if err = setTransactionId(devices.getPoolDevName(), devices.TransactionId, devices.NewTransactionId); err != nil { + return fmt.Errorf("Error setting devmapper transition ID: %s", err) + } + devices.TransactionId = devices.NewTransactionId + } + return nil +} + +func (devices *DeviceSet) lookupDevice(hash string) (*DevInfo, error) { + devices.devicesLock.Lock() + defer devices.devicesLock.Unlock() + info := devices.Devices[hash] + if info == nil { + info = devices.loadMetadata(hash) + if info == nil { + return nil, fmt.Errorf("Unknown device %s", hash) + } + + devices.Devices[hash] = info + } + return info, nil +} + +func (devices *DeviceSet) registerDevice(id int, hash string, size uint64) (*DevInfo, error) { + log.Debugf("registerDevice(%v, %v)", id, hash) + info := &DevInfo{ + Hash: hash, + DeviceId: id, + Size: size, + TransactionId: devices.allocateTransactionId(), + Initialized: false, + devices: devices, + } + + devices.devicesLock.Lock() + devices.Devices[hash] = info + devices.devicesLock.Unlock() + + if err := devices.saveMetadata(info); err != nil { + // Try to remove unused device + devices.devicesLock.Lock() + delete(devices.Devices, hash) + devices.devicesLock.Unlock() + return nil, err + } + + return info, nil +} + +func (devices *DeviceSet) activateDeviceIfNeeded(info *DevInfo) error { + log.Debugf("activateDeviceIfNeeded(%v)", info.Hash) + + if devinfo, _ := getInfo(info.Name()); devinfo != nil && devinfo.Exists != 0 { + return nil + } + + return activateDevice(devices.getPoolDevName(), info.Name(), info.DeviceId, info.Size) +} + +func (devices *DeviceSet) createFilesystem(info *DevInfo) error { + devname := info.DevName() + + args := []string{} + for _, arg := range devices.mkfsArgs { + args = append(args, arg) + } + + args = append(args, devname) + + var err error + switch devices.filesystem { + case "xfs": + err = exec.Command("mkfs.xfs", args...).Run() + case "ext4": + err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0,lazy_journal_init=0"}, args...)...).Run() + if err != nil { + err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0"}, args...)...).Run() + } + if err != nil { + return err + } + err = exec.Command("tune2fs", append([]string{"-c", "-1", "-i", "0"}, devname)...).Run() + default: + err = fmt.Errorf("Unsupported filesystem type %s", devices.filesystem) + } + if err != nil { + return err + } + + return nil +} + +func (devices *DeviceSet) initMetaData() error { + _, _, _, params, err := getStatus(devices.getPoolName()) + if err != nil { + return err + } + + if _, err := fmt.Sscanf(params, "%d", &devices.TransactionId); err != nil { + return err + } + devices.NewTransactionId = devices.TransactionId + + // Migrate old metadatafile + + jsonData, err := ioutil.ReadFile(devices.oldMetadataFile()) + if err != nil && !os.IsNotExist(err) { + return err + } + + if jsonData != nil { + m := MetaData{Devices: make(map[string]*DevInfo)} + + if err := json.Unmarshal(jsonData, &m); err != nil { + return err + } + + for hash, info := range m.Devices { + info.Hash = hash + + // If the transaction id is larger than the actual one we lost the device due to some crash + if info.TransactionId <= devices.TransactionId { + devices.saveMetadata(info) + } + } + if err := os.Rename(devices.oldMetadataFile(), devices.oldMetadataFile()+".migrated"); err != nil { + return err + } + + } + + return nil +} + +func (devices *DeviceSet) loadMetadata(hash string) *DevInfo { + info := &DevInfo{Hash: hash, devices: devices} + + jsonData, err := ioutil.ReadFile(devices.metadataFile(info)) + if err != nil { + return nil + } + + if err := json.Unmarshal(jsonData, &info); err != nil { + return nil + } + + // If the transaction id is larger than the actual one we lost the device due to some crash + if info.TransactionId > devices.TransactionId { + return nil + } + + return info +} + +func (devices *DeviceSet) setupBaseImage() error { + oldInfo, _ := devices.lookupDevice("") + if oldInfo != nil && oldInfo.Initialized { + return nil + } + + if oldInfo != nil && !oldInfo.Initialized { + log.Debugf("Removing uninitialized base image") + if err := devices.deleteDevice(oldInfo); err != nil { + return err + } + } + + log.Debugf("Initializing base device-manager snapshot") + + id := devices.nextDeviceId + + // Create initial device + if err := createDevice(devices.getPoolDevName(), &id); err != nil { + return err + } + + // Ids are 24bit, so wrap around + devices.nextDeviceId = (id + 1) & 0xffffff + + log.Debugf("Registering base device (id %v) with FS size %v", id, devices.baseFsSize) + info, err := devices.registerDevice(id, "", devices.baseFsSize) + if err != nil { + _ = deleteDevice(devices.getPoolDevName(), id) + return err + } + + log.Debugf("Creating filesystem on base device-manager snapshot") + + if err = devices.activateDeviceIfNeeded(info); err != nil { + return err + } + + if err := devices.createFilesystem(info); err != nil { + return err + } + + info.Initialized = true + if err = devices.saveMetadata(info); err != nil { + info.Initialized = false + return err + } + + return nil +} + +func setCloseOnExec(name string) { + if fileInfos, _ := ioutil.ReadDir("/proc/self/fd"); fileInfos != nil { + for _, i := range fileInfos { + link, _ := os.Readlink(filepath.Join("/proc/self/fd", i.Name())) + if link == name { + fd, err := strconv.Atoi(i.Name()) + if err == nil { + syscall.CloseOnExec(fd) + } + } + } + } +} + +func (devices *DeviceSet) log(level int, file string, line int, dmError int, message string) { + if level >= 7 { + return // Ignore _LOG_DEBUG + } + + log.Debugf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) +} + +func major(device uint64) uint64 { + return (device >> 8) & 0xfff +} + +func minor(device uint64) uint64 { + return (device & 0xff) | ((device >> 12) & 0xfff00) +} + +func (devices *DeviceSet) ResizePool(size int64) error { + dirname := devices.loopbackDir() + datafilename := path.Join(dirname, "data") + if len(devices.dataDevice) > 0 { + datafilename = devices.dataDevice + } + metadatafilename := path.Join(dirname, "metadata") + if len(devices.metadataDevice) > 0 { + metadatafilename = devices.metadataDevice + } + + datafile, err := os.OpenFile(datafilename, os.O_RDWR, 0) + if datafile == nil { + return err + } + defer datafile.Close() + + fi, err := datafile.Stat() + if fi == nil { + return err + } + + if fi.Size() > size { + return fmt.Errorf("Can't shrink file") + } + + dataloopback := FindLoopDeviceFor(datafile) + if dataloopback == nil { + return fmt.Errorf("Unable to find loopback mount for: %s", datafilename) + } + defer dataloopback.Close() + + metadatafile, err := os.OpenFile(metadatafilename, os.O_RDWR, 0) + if metadatafile == nil { + return err + } + defer metadatafile.Close() + + metadataloopback := FindLoopDeviceFor(metadatafile) + if metadataloopback == nil { + return fmt.Errorf("Unable to find loopback mount for: %s", metadatafilename) + } + defer metadataloopback.Close() + + // Grow loopback file + if err := datafile.Truncate(size); err != nil { + return fmt.Errorf("Unable to grow loopback file: %s", err) + } + + // Reload size for loopback device + if err := LoopbackSetCapacity(dataloopback); err != nil { + return fmt.Errorf("Unable to update loopback capacity: %s", err) + } + + // Suspend the pool + if err := suspendDevice(devices.getPoolName()); err != nil { + return fmt.Errorf("Unable to suspend pool: %s", err) + } + + // Reload with the new block sizes + if err := reloadPool(devices.getPoolName(), dataloopback, metadataloopback, devices.thinpBlockSize); err != nil { + return fmt.Errorf("Unable to reload pool: %s", err) + } + + // Resume the pool + if err := resumeDevice(devices.getPoolName()); err != nil { + return fmt.Errorf("Unable to resume pool: %s", err) + } + + return nil +} + +func (devices *DeviceSet) initDevmapper(doInit bool) error { + logInit(devices) + + _, err := getDriverVersion() + if err != nil { + // Can't even get driver version, assume not supported + return graphdriver.ErrNotSupported + } + + if err := os.MkdirAll(devices.metadataDir(), 0700); err != nil && !os.IsExist(err) { + return err + } + + // Set the device prefix from the device id and inode of the docker root dir + + st, err := os.Stat(devices.root) + if err != nil { + return fmt.Errorf("Error looking up dir %s: %s", devices.root, err) + } + sysSt := st.Sys().(*syscall.Stat_t) + // "reg-" stands for "regular file". + // In the future we might use "dev-" for "device file", etc. + // docker-maj,min[-inode] stands for: + // - Managed by docker + // - The target of this device is at major and minor + // - If is defined, use that file inside the device as a loopback image. Otherwise use the device itself. + devices.devicePrefix = fmt.Sprintf("docker-%d:%d-%d", major(sysSt.Dev), minor(sysSt.Dev), sysSt.Ino) + log.Debugf("Generated prefix: %s", devices.devicePrefix) + + // Check for the existence of the device -pool + log.Debugf("Checking for existence of the pool '%s'", devices.getPoolName()) + info, err := getInfo(devices.getPoolName()) + if info == nil { + log.Debugf("Error device getInfo: %s", err) + return err + } + + // It seems libdevmapper opens this without O_CLOEXEC, and go exec will not close files + // that are not Close-on-exec, and lxc-start will die if it inherits any unexpected files, + // so we add this badhack to make sure it closes itself + setCloseOnExec("/dev/mapper/control") + + // Make sure the sparse images exist in /devicemapper/data and + // /devicemapper/metadata + + createdLoopback := false + + // If the pool doesn't exist, create it + if info.Exists == 0 { + log.Debugf("Pool doesn't exist. Creating it.") + + var ( + dataFile *os.File + metadataFile *os.File + ) + + if devices.dataDevice == "" { + // Make sure the sparse images exist in /devicemapper/data + + hasData := devices.hasImage("data") + + if !doInit && !hasData { + return errors.New("Loopback data file not found") + } + + if !hasData { + createdLoopback = true + } + + data, err := devices.ensureImage("data", devices.dataLoopbackSize) + if err != nil { + log.Debugf("Error device ensureImage (data): %s", err) + return err + } + + dataFile, err = attachLoopDevice(data) + if err != nil { + return err + } + } else { + dataFile, err = os.OpenFile(devices.dataDevice, os.O_RDWR, 0600) + if err != nil { + return err + } + } + defer dataFile.Close() + + if devices.metadataDevice == "" { + // Make sure the sparse images exist in /devicemapper/metadata + + hasMetadata := devices.hasImage("metadata") + + if !doInit && !hasMetadata { + return errors.New("Loopback metadata file not found") + } + + if !hasMetadata { + createdLoopback = true + } + + metadata, err := devices.ensureImage("metadata", devices.metaDataLoopbackSize) + if err != nil { + log.Debugf("Error device ensureImage (metadata): %s", err) + return err + } + + metadataFile, err = attachLoopDevice(metadata) + if err != nil { + return err + } + } else { + metadataFile, err = os.OpenFile(devices.metadataDevice, os.O_RDWR, 0600) + if err != nil { + return err + } + } + defer metadataFile.Close() + + if err := createPool(devices.getPoolName(), dataFile, metadataFile, devices.thinpBlockSize); err != nil { + return err + } + } + + // If we didn't just create the data or metadata image, we need to + // load the transaction id and migrate old metadata + if !createdLoopback { + if err = devices.initMetaData(); err != nil { + return err + } + } + + // Setup the base image + if doInit { + if err := devices.setupBaseImage(); err != nil { + log.Debugf("Error device setupBaseImage: %s", err) + return err + } + } + + return nil +} + +func (devices *DeviceSet) AddDevice(hash, baseHash string) error { + baseInfo, err := devices.lookupDevice(baseHash) + if err != nil { + return err + } + + baseInfo.lock.Lock() + defer baseInfo.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + if info, _ := devices.lookupDevice(hash); info != nil { + return fmt.Errorf("device %s already exists", hash) + } + + deviceId := devices.nextDeviceId + + if err := createSnapDevice(devices.getPoolDevName(), &deviceId, baseInfo.Name(), baseInfo.DeviceId); err != nil { + log.Debugf("Error creating snap device: %s", err) + return err + } + + // Ids are 24bit, so wrap around + devices.nextDeviceId = (deviceId + 1) & 0xffffff + + if _, err := devices.registerDevice(deviceId, hash, baseInfo.Size); err != nil { + deleteDevice(devices.getPoolDevName(), deviceId) + log.Debugf("Error registering device: %s", err) + return err + } + return nil +} + +func (devices *DeviceSet) deleteDevice(info *DevInfo) error { + if devices.doBlkDiscard { + // This is a workaround for the kernel not discarding block so + // on the thin pool when we remove a thinp device, so we do it + // manually + if err := devices.activateDeviceIfNeeded(info); err == nil { + if err := BlockDeviceDiscard(info.DevName()); err != nil { + log.Debugf("Error discarding block on device: %s (ignoring)", err) + } + } + } + + devinfo, _ := getInfo(info.Name()) + if devinfo != nil && devinfo.Exists != 0 { + if err := devices.removeDeviceAndWait(info.Name()); err != nil { + log.Debugf("Error removing device: %s", err) + return err + } + } + + if err := deleteDevice(devices.getPoolDevName(), info.DeviceId); err != nil { + log.Debugf("Error deleting device: %s", err) + return err + } + + devices.allocateTransactionId() + devices.devicesLock.Lock() + delete(devices.Devices, info.Hash) + devices.devicesLock.Unlock() + + if err := devices.removeMetadata(info); err != nil { + devices.devicesLock.Lock() + devices.Devices[info.Hash] = info + devices.devicesLock.Unlock() + log.Debugf("Error removing meta data: %s", err) + return err + } + + return nil +} + +func (devices *DeviceSet) DeleteDevice(hash string) error { + info, err := devices.lookupDevice(hash) + if err != nil { + return err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + return devices.deleteDevice(info) +} + +func (devices *DeviceSet) deactivatePool() error { + log.Debugf("[devmapper] deactivatePool()") + defer log.Debugf("[devmapper] deactivatePool END") + devname := devices.getPoolDevName() + devinfo, err := getInfo(devname) + if err != nil { + return err + } + if devinfo.Exists != 0 { + return removeDevice(devname) + } + + return nil +} + +func (devices *DeviceSet) deactivateDevice(info *DevInfo) error { + log.Debugf("[devmapper] deactivateDevice(%s)", info.Hash) + defer log.Debugf("[devmapper] deactivateDevice END") + + // Wait for the unmount to be effective, + // by watching the value of Info.OpenCount for the device + if err := devices.waitClose(info); err != nil { + log.Errorf("Warning: error waiting for device %s to close: %s", info.Hash, err) + } + + devinfo, err := getInfo(info.Name()) + if err != nil { + return err + } + if devinfo.Exists != 0 { + if err := devices.removeDeviceAndWait(info.Name()); err != nil { + return err + } + } + + return nil +} + +// Issues the underlying dm remove operation and then waits +// for it to finish. +func (devices *DeviceSet) removeDeviceAndWait(devname string) error { + var err error + + for i := 0; i < 1000; i++ { + err = removeDevice(devname) + if err == nil { + break + } + if err != ErrBusy { + return err + } + + // If we see EBUSY it may be a transient error, + // sleep a bit a retry a few times. + devices.Unlock() + time.Sleep(10 * time.Millisecond) + devices.Lock() + } + if err != nil { + return err + } + + if err := devices.waitRemove(devname); err != nil { + return err + } + return nil +} + +// waitRemove blocks until either: +// a) the device registered at - is removed, +// or b) the 10 second timeout expires. +func (devices *DeviceSet) waitRemove(devname string) error { + log.Debugf("[deviceset %s] waitRemove(%s)", devices.devicePrefix, devname) + defer log.Debugf("[deviceset %s] waitRemove(%s) END", devices.devicePrefix, devname) + i := 0 + for ; i < 1000; i++ { + devinfo, err := getInfo(devname) + if err != nil { + // If there is an error we assume the device doesn't exist. + // The error might actually be something else, but we can't differentiate. + return nil + } + if i%100 == 0 { + log.Debugf("Waiting for removal of %s: exists=%d", devname, devinfo.Exists) + } + if devinfo.Exists == 0 { + break + } + + devices.Unlock() + time.Sleep(10 * time.Millisecond) + devices.Lock() + } + if i == 1000 { + return fmt.Errorf("Timeout while waiting for device %s to be removed", devname) + } + return nil +} + +// waitClose blocks until either: +// a) the device registered at - is closed, +// or b) the 10 second timeout expires. +func (devices *DeviceSet) waitClose(info *DevInfo) error { + i := 0 + for ; i < 1000; i++ { + devinfo, err := getInfo(info.Name()) + if err != nil { + return err + } + if i%100 == 0 { + log.Debugf("Waiting for unmount of %s: opencount=%d", info.Hash, devinfo.OpenCount) + } + if devinfo.OpenCount == 0 { + break + } + devices.Unlock() + time.Sleep(10 * time.Millisecond) + devices.Lock() + } + if i == 1000 { + return fmt.Errorf("Timeout while waiting for device %s to close", info.Hash) + } + return nil +} + +func (devices *DeviceSet) Shutdown() error { + + log.Debugf("[deviceset %s] shutdown()", devices.devicePrefix) + log.Debugf("[devmapper] Shutting down DeviceSet: %s", devices.root) + defer log.Debugf("[deviceset %s] shutdown END", devices.devicePrefix) + + var devs []*DevInfo + + devices.devicesLock.Lock() + for _, info := range devices.Devices { + devs = append(devs, info) + } + devices.devicesLock.Unlock() + + for _, info := range devs { + info.lock.Lock() + if info.mountCount > 0 { + // We use MNT_DETACH here in case it is still busy in some running + // container. This means it'll go away from the global scope directly, + // and the device will be released when that container dies. + if err := syscall.Unmount(info.mountPath, syscall.MNT_DETACH); err != nil { + log.Debugf("Shutdown unmounting %s, error: %s", info.mountPath, err) + } + + devices.Lock() + if err := devices.deactivateDevice(info); err != nil { + log.Debugf("Shutdown deactivate %s , error: %s", info.Hash, err) + } + devices.Unlock() + } + info.lock.Unlock() + } + + info, _ := devices.lookupDevice("") + if info != nil { + info.lock.Lock() + devices.Lock() + if err := devices.deactivateDevice(info); err != nil { + log.Debugf("Shutdown deactivate base , error: %s", err) + } + devices.Unlock() + info.lock.Unlock() + } + + devices.Lock() + if err := devices.deactivatePool(); err != nil { + log.Debugf("Shutdown deactivate pool , error: %s", err) + } + devices.Unlock() + + return nil +} + +func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error { + info, err := devices.lookupDevice(hash) + if err != nil { + return err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + if info.mountCount > 0 { + if path != info.mountPath { + return fmt.Errorf("Trying to mount devmapper device in multple places (%s, %s)", info.mountPath, path) + } + + info.mountCount++ + return nil + } + + if err := devices.activateDeviceIfNeeded(info); err != nil { + return fmt.Errorf("Error activating devmapper device for '%s': %s", hash, err) + } + + var flags uintptr = syscall.MS_MGC_VAL + + fstype, err := ProbeFsType(info.DevName()) + if err != nil { + return err + } + + options := "" + + if fstype == "xfs" { + // XFS needs nouuid or it can't mount filesystems with the same fs + options = joinMountOptions(options, "nouuid") + } + + options = joinMountOptions(options, devices.mountOptions) + options = joinMountOptions(options, label.FormatMountLabel("", mountLabel)) + + err = syscall.Mount(info.DevName(), path, fstype, flags, joinMountOptions("discard", options)) + if err != nil && err == syscall.EINVAL { + err = syscall.Mount(info.DevName(), path, fstype, flags, options) + } + if err != nil { + return fmt.Errorf("Error mounting '%s' on '%s': %s", info.DevName(), path, err) + } + + info.mountCount = 1 + info.mountPath = path + + return nil +} + +func (devices *DeviceSet) UnmountDevice(hash string) error { + log.Debugf("[devmapper] UnmountDevice(hash=%s)", hash) + defer log.Debugf("[devmapper] UnmountDevice END") + + info, err := devices.lookupDevice(hash) + if err != nil { + return err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + if info.mountCount == 0 { + return fmt.Errorf("UnmountDevice: device not-mounted id %s\n", hash) + } + + info.mountCount-- + if info.mountCount > 0 { + return nil + } + + log.Debugf("[devmapper] Unmount(%s)", info.mountPath) + if err := syscall.Unmount(info.mountPath, 0); err != nil { + return err + } + log.Debugf("[devmapper] Unmount done") + + if err := devices.deactivateDevice(info); err != nil { + return err + } + + info.mountPath = "" + + return nil +} + +func (devices *DeviceSet) HasDevice(hash string) bool { + devices.Lock() + defer devices.Unlock() + + info, _ := devices.lookupDevice(hash) + return info != nil +} + +func (devices *DeviceSet) HasActivatedDevice(hash string) bool { + info, _ := devices.lookupDevice(hash) + if info == nil { + return false + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + devinfo, _ := getInfo(info.Name()) + return devinfo != nil && devinfo.Exists != 0 +} + +func (devices *DeviceSet) List() []string { + devices.Lock() + defer devices.Unlock() + + devices.devicesLock.Lock() + ids := make([]string, len(devices.Devices)) + i := 0 + for k := range devices.Devices { + ids[i] = k + i++ + } + devices.devicesLock.Unlock() + + return ids +} + +func (devices *DeviceSet) deviceStatus(devName string) (sizeInSectors, mappedSectors, highestMappedSector uint64, err error) { + var params string + _, sizeInSectors, _, params, err = getStatus(devName) + if err != nil { + return + } + if _, err = fmt.Sscanf(params, "%d %d", &mappedSectors, &highestMappedSector); err == nil { + return + } + return +} + +func (devices *DeviceSet) GetDeviceStatus(hash string) (*DevStatus, error) { + info, err := devices.lookupDevice(hash) + if err != nil { + return nil, err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + status := &DevStatus{ + DeviceId: info.DeviceId, + Size: info.Size, + TransactionId: info.TransactionId, + } + + if err := devices.activateDeviceIfNeeded(info); err != nil { + return nil, fmt.Errorf("Error activating devmapper device for '%s': %s", hash, err) + } + + if sizeInSectors, mappedSectors, highestMappedSector, err := devices.deviceStatus(info.DevName()); err != nil { + return nil, err + } else { + status.SizeInSectors = sizeInSectors + status.MappedSectors = mappedSectors + status.HighestMappedSector = highestMappedSector + } + + return status, nil +} + +func (devices *DeviceSet) poolStatus() (totalSizeInSectors, transactionId, dataUsed, dataTotal, metadataUsed, metadataTotal uint64, err error) { + var params string + if _, totalSizeInSectors, _, params, err = getStatus(devices.getPoolName()); err == nil { + _, err = fmt.Sscanf(params, "%d %d/%d %d/%d", &transactionId, &metadataUsed, &metadataTotal, &dataUsed, &dataTotal) + } + return +} + +func (devices *DeviceSet) Status() *Status { + devices.Lock() + defer devices.Unlock() + + status := &Status{} + + status.PoolName = devices.getPoolName() + if len(devices.dataDevice) > 0 { + status.DataLoopback = devices.dataDevice + } else { + status.DataLoopback = path.Join(devices.loopbackDir(), "data") + } + if len(devices.metadataDevice) > 0 { + status.MetadataLoopback = devices.metadataDevice + } else { + status.MetadataLoopback = path.Join(devices.loopbackDir(), "metadata") + } + + totalSizeInSectors, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus() + if err == nil { + // Convert from blocks to bytes + blockSizeInSectors := totalSizeInSectors / dataTotal + + status.Data.Used = dataUsed * blockSizeInSectors * 512 + status.Data.Total = dataTotal * blockSizeInSectors * 512 + + // metadata blocks are always 4k + status.Metadata.Used = metadataUsed * 4096 + status.Metadata.Total = metadataTotal * 4096 + + status.SectorSize = blockSizeInSectors * 512 + } + + return status +} + +func NewDeviceSet(root string, doInit bool, options []string) (*DeviceSet, error) { + SetDevDir("/dev") + + devices := &DeviceSet{ + root: root, + MetaData: MetaData{Devices: make(map[string]*DevInfo)}, + dataLoopbackSize: DefaultDataLoopbackSize, + metaDataLoopbackSize: DefaultMetaDataLoopbackSize, + baseFsSize: DefaultBaseFsSize, + filesystem: "ext4", + doBlkDiscard: true, + thinpBlockSize: DefaultThinpBlockSize, + } + + foundBlkDiscard := false + for _, option := range options { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return nil, err + } + key = strings.ToLower(key) + switch key { + case "dm.basesize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + devices.baseFsSize = uint64(size) + case "dm.loopdatasize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + devices.dataLoopbackSize = size + case "dm.loopmetadatasize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + devices.metaDataLoopbackSize = size + case "dm.fs": + if val != "ext4" && val != "xfs" { + return nil, fmt.Errorf("Unsupported filesystem %s\n", val) + } + devices.filesystem = val + case "dm.mkfsarg": + devices.mkfsArgs = append(devices.mkfsArgs, val) + case "dm.mountopt": + devices.mountOptions = joinMountOptions(devices.mountOptions, val) + case "dm.metadatadev": + devices.metadataDevice = val + case "dm.datadev": + devices.dataDevice = val + case "dm.blkdiscard": + foundBlkDiscard = true + devices.doBlkDiscard, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + case "dm.blocksize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + // convert to 512b sectors + devices.thinpBlockSize = uint32(size) >> 9 + default: + return nil, fmt.Errorf("Unknown option %s\n", key) + } + } + + // By default, don't do blk discard hack on raw devices, its rarely useful and is expensive + if !foundBlkDiscard && devices.dataDevice != "" { + devices.doBlkDiscard = false + } + + if err := devices.initDevmapper(doInit); err != nil { + return nil, err + } + + return devices, nil +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper.go b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper.go new file mode 100644 index 0000000000..d09e740749 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper.go @@ -0,0 +1,646 @@ +// +build linux + +package devmapper + +import ( + "errors" + "fmt" + "os" + "runtime" + "syscall" + + "github.com/docker/docker/pkg/log" +) + +type DevmapperLogger interface { + log(level int, file string, line int, dmError int, message string) +} + +const ( + DeviceCreate TaskType = iota + DeviceReload + DeviceRemove + DeviceRemoveAll + DeviceSuspend + DeviceResume + DeviceInfo + DeviceDeps + DeviceRename + DeviceVersion + DeviceStatus + DeviceTable + DeviceWaitevent + DeviceList + DeviceClear + DeviceMknodes + DeviceListVersions + DeviceTargetMsg + DeviceSetGeometry +) + +const ( + AddNodeOnResume AddNodeType = iota + AddNodeOnCreate +) + +var ( + ErrTaskRun = errors.New("dm_task_run failed") + ErrTaskSetName = errors.New("dm_task_set_name failed") + ErrTaskSetMessage = errors.New("dm_task_set_message failed") + ErrTaskSetAddNode = errors.New("dm_task_set_add_node failed") + ErrTaskSetRo = errors.New("dm_task_set_ro failed") + ErrTaskAddTarget = errors.New("dm_task_add_target failed") + ErrTaskSetSector = errors.New("dm_task_set_sector failed") + ErrTaskGetInfo = errors.New("dm_task_get_info failed") + ErrTaskGetDriverVersion = errors.New("dm_task_get_driver_version failed") + ErrTaskSetCookie = errors.New("dm_task_set_cookie failed") + ErrNilCookie = errors.New("cookie ptr can't be nil") + ErrAttachLoopbackDevice = errors.New("loopback mounting failed") + ErrGetBlockSize = errors.New("Can't get block size") + ErrUdevWait = errors.New("wait on udev cookie failed") + ErrSetDevDir = errors.New("dm_set_dev_dir failed") + ErrGetLibraryVersion = errors.New("dm_get_library_version failed") + ErrCreateRemoveTask = errors.New("Can't create task of type DeviceRemove") + ErrRunRemoveDevice = errors.New("running removeDevice failed") + ErrInvalidAddNode = errors.New("Invalide AddNoce type") + ErrGetLoopbackBackingFile = errors.New("Unable to get loopback backing file") + ErrLoopbackSetCapacity = errors.New("Unable set loopback capacity") + ErrBusy = errors.New("Device is Busy") + + dmSawBusy bool + dmSawExist bool +) + +type ( + Task struct { + unmanaged *CDmTask + } + Info struct { + Exists int + Suspended int + LiveTable int + InactiveTable int + OpenCount int32 + EventNr uint32 + Major uint32 + Minor uint32 + ReadOnly int + TargetCount int32 + } + TaskType int + AddNodeType int +) + +func (t *Task) destroy() { + if t != nil { + DmTaskDestroy(t.unmanaged) + runtime.SetFinalizer(t, nil) + } +} + +func TaskCreate(tasktype TaskType) *Task { + Ctask := DmTaskCreate(int(tasktype)) + if Ctask == nil { + return nil + } + task := &Task{unmanaged: Ctask} + runtime.SetFinalizer(task, (*Task).destroy) + return task +} + +func (t *Task) Run() error { + if res := DmTaskRun(t.unmanaged); res != 1 { + return ErrTaskRun + } + return nil +} + +func (t *Task) SetName(name string) error { + if res := DmTaskSetName(t.unmanaged, name); res != 1 { + return ErrTaskSetName + } + return nil +} + +func (t *Task) SetMessage(message string) error { + if res := DmTaskSetMessage(t.unmanaged, message); res != 1 { + return ErrTaskSetMessage + } + return nil +} + +func (t *Task) SetSector(sector uint64) error { + if res := DmTaskSetSector(t.unmanaged, sector); res != 1 { + return ErrTaskSetSector + } + return nil +} + +func (t *Task) SetCookie(cookie *uint, flags uint16) error { + if cookie == nil { + return ErrNilCookie + } + if res := DmTaskSetCookie(t.unmanaged, cookie, flags); res != 1 { + return ErrTaskSetCookie + } + return nil +} + +func (t *Task) SetAddNode(addNode AddNodeType) error { + if addNode != AddNodeOnResume && addNode != AddNodeOnCreate { + return ErrInvalidAddNode + } + if res := DmTaskSetAddNode(t.unmanaged, addNode); res != 1 { + return ErrTaskSetAddNode + } + return nil +} + +func (t *Task) SetRo() error { + if res := DmTaskSetRo(t.unmanaged); res != 1 { + return ErrTaskSetRo + } + return nil +} + +func (t *Task) AddTarget(start, size uint64, ttype, params string) error { + if res := DmTaskAddTarget(t.unmanaged, start, size, + ttype, params); res != 1 { + return ErrTaskAddTarget + } + return nil +} + +func (t *Task) GetInfo() (*Info, error) { + info := &Info{} + if res := DmTaskGetInfo(t.unmanaged, info); res != 1 { + return nil, ErrTaskGetInfo + } + return info, nil +} + +func (t *Task) GetDriverVersion() (string, error) { + res := DmTaskGetDriverVersion(t.unmanaged) + if res == "" { + return "", ErrTaskGetDriverVersion + } + return res, nil +} + +func (t *Task) GetNextTarget(next uintptr) (nextPtr uintptr, start uint64, + length uint64, targetType string, params string) { + + return DmGetNextTarget(t.unmanaged, next, &start, &length, + &targetType, ¶ms), + start, length, targetType, params +} + +func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) { + loopInfo, err := ioctlLoopGetStatus64(file.Fd()) + if err != nil { + log.Errorf("Error get loopback backing file: %s", err) + return 0, 0, ErrGetLoopbackBackingFile + } + return loopInfo.loDevice, loopInfo.loInode, nil +} + +func LoopbackSetCapacity(file *os.File) error { + if err := ioctlLoopSetCapacity(file.Fd(), 0); err != nil { + log.Errorf("Error loopbackSetCapacity: %s", err) + return ErrLoopbackSetCapacity + } + return nil +} + +func FindLoopDeviceFor(file *os.File) *os.File { + stat, err := file.Stat() + if err != nil { + return nil + } + targetInode := stat.Sys().(*syscall.Stat_t).Ino + targetDevice := stat.Sys().(*syscall.Stat_t).Dev + + for i := 0; true; i++ { + path := fmt.Sprintf("/dev/loop%d", i) + + file, err := os.OpenFile(path, os.O_RDWR, 0) + if err != nil { + if os.IsNotExist(err) { + return nil + } + + // Ignore all errors until the first not-exist + // we want to continue looking for the file + continue + } + + dev, inode, err := getLoopbackBackingFile(file) + if err == nil && dev == targetDevice && inode == targetInode { + return file + } + file.Close() + } + + return nil +} + +func UdevWait(cookie uint) error { + if res := DmUdevWait(cookie); res != 1 { + log.Debugf("Failed to wait on udev cookie %d", cookie) + return ErrUdevWait + } + return nil +} + +func LogInitVerbose(level int) { + DmLogInitVerbose(level) +} + +var dmLogger DevmapperLogger = nil + +func logInit(logger DevmapperLogger) { + dmLogger = logger + LogWithErrnoInit() +} + +func SetDevDir(dir string) error { + if res := DmSetDevDir(dir); res != 1 { + log.Debugf("Error dm_set_dev_dir") + return ErrSetDevDir + } + return nil +} + +func GetLibraryVersion() (string, error) { + var version string + if res := DmGetLibraryVersion(&version); res != 1 { + return "", ErrGetLibraryVersion + } + return version, nil +} + +// Useful helper for cleanup +func RemoveDevice(name string) error { + task := TaskCreate(DeviceRemove) + if task == nil { + return ErrCreateRemoveTask + } + if err := task.SetName(name); err != nil { + log.Debugf("Can't set task name %s", name) + return err + } + if err := task.Run(); err != nil { + return ErrRunRemoveDevice + } + return nil +} + +func GetBlockDeviceSize(file *os.File) (uint64, error) { + size, err := ioctlBlkGetSize64(file.Fd()) + if err != nil { + log.Errorf("Error getblockdevicesize: %s", err) + return 0, ErrGetBlockSize + } + return uint64(size), nil +} + +func BlockDeviceDiscard(path string) error { + file, err := os.OpenFile(path, os.O_RDWR, 0) + if err != nil { + return err + } + defer file.Close() + + size, err := GetBlockDeviceSize(file) + if err != nil { + return err + } + + if err := ioctlBlkDiscard(file.Fd(), 0, size); err != nil { + return err + } + + // Without this sometimes the remove of the device that happens after + // discard fails with EBUSY. + syscall.Sync() + + return nil +} + +// This is the programmatic example of "dmsetup create" +func createPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { + task, err := createTask(DeviceCreate, poolName) + if task == nil { + return err + } + + size, err := GetBlockDeviceSize(dataFile) + if err != nil { + return fmt.Errorf("Can't get data size %s", err) + } + + params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) + if err := task.AddTarget(0, size/512, "thin-pool", params); err != nil { + return fmt.Errorf("Can't add target %s", err) + } + + var cookie uint = 0 + if err := task.SetCookie(&cookie, 0); err != nil { + return fmt.Errorf("Can't set cookie %s", err) + } + + if err := task.Run(); err != nil { + return fmt.Errorf("Error running DeviceCreate (createPool) %s", err) + } + + UdevWait(cookie) + + return nil +} + +func reloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { + task, err := createTask(DeviceReload, poolName) + if task == nil { + return err + } + + size, err := GetBlockDeviceSize(dataFile) + if err != nil { + return fmt.Errorf("Can't get data size %s", err) + } + + params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) + if err := task.AddTarget(0, size/512, "thin-pool", params); err != nil { + return fmt.Errorf("Can't add target %s", err) + } + + if err := task.Run(); err != nil { + return fmt.Errorf("Error running DeviceCreate %s", err) + } + + return nil +} + +func createTask(t TaskType, name string) (*Task, error) { + task := TaskCreate(t) + if task == nil { + return nil, fmt.Errorf("Can't create task of type %d", int(t)) + } + if err := task.SetName(name); err != nil { + return nil, fmt.Errorf("Can't set task name %s", name) + } + return task, nil +} + +func getInfo(name string) (*Info, error) { + task, err := createTask(DeviceInfo, name) + if task == nil { + return nil, err + } + if err := task.Run(); err != nil { + return nil, err + } + return task.GetInfo() +} + +func getDriverVersion() (string, error) { + task := TaskCreate(DeviceVersion) + if task == nil { + return "", fmt.Errorf("Can't create DeviceVersion task") + } + if err := task.Run(); err != nil { + return "", err + } + return task.GetDriverVersion() +} + +func getStatus(name string) (uint64, uint64, string, string, error) { + task, err := createTask(DeviceStatus, name) + if task == nil { + log.Debugf("getStatus: Error createTask: %s", err) + return 0, 0, "", "", err + } + if err := task.Run(); err != nil { + log.Debugf("getStatus: Error Run: %s", err) + return 0, 0, "", "", err + } + + devinfo, err := task.GetInfo() + if err != nil { + log.Debugf("getStatus: Error GetInfo: %s", err) + return 0, 0, "", "", err + } + if devinfo.Exists == 0 { + log.Debugf("getStatus: Non existing device %s", name) + return 0, 0, "", "", fmt.Errorf("Non existing device %s", name) + } + + _, start, length, targetType, params := task.GetNextTarget(0) + return start, length, targetType, params, nil +} + +func setTransactionId(poolName string, oldId uint64, newId uint64) error { + task, err := createTask(DeviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.SetSector(0); err != nil { + return fmt.Errorf("Can't set sector %s", err) + } + + if err := task.SetMessage(fmt.Sprintf("set_transaction_id %d %d", oldId, newId)); err != nil { + return fmt.Errorf("Can't set message %s", err) + } + + if err := task.Run(); err != nil { + return fmt.Errorf("Error running setTransactionId %s", err) + } + return nil +} + +func suspendDevice(name string) error { + task, err := createTask(DeviceSuspend, name) + if task == nil { + return err + } + if err := task.Run(); err != nil { + return fmt.Errorf("Error running DeviceSuspend %s", err) + } + return nil +} + +func resumeDevice(name string) error { + task, err := createTask(DeviceResume, name) + if task == nil { + return err + } + + var cookie uint = 0 + if err := task.SetCookie(&cookie, 0); err != nil { + return fmt.Errorf("Can't set cookie %s", err) + } + + if err := task.Run(); err != nil { + return fmt.Errorf("Error running DeviceResume %s", err) + } + + UdevWait(cookie) + + return nil +} + +func createDevice(poolName string, deviceId *int) error { + log.Debugf("[devmapper] createDevice(poolName=%v, deviceId=%v)", poolName, *deviceId) + + for { + task, err := createTask(DeviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.SetSector(0); err != nil { + return fmt.Errorf("Can't set sector %s", err) + } + + if err := task.SetMessage(fmt.Sprintf("create_thin %d", *deviceId)); err != nil { + return fmt.Errorf("Can't set message %s", err) + } + + dmSawExist = false + if err := task.Run(); err != nil { + if dmSawExist { + // Already exists, try next id + *deviceId++ + continue + } + return fmt.Errorf("Error running createDevice %s", err) + } + break + } + return nil +} + +func deleteDevice(poolName string, deviceId int) error { + task, err := createTask(DeviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.SetSector(0); err != nil { + return fmt.Errorf("Can't set sector %s", err) + } + + if err := task.SetMessage(fmt.Sprintf("delete %d", deviceId)); err != nil { + return fmt.Errorf("Can't set message %s", err) + } + + if err := task.Run(); err != nil { + return fmt.Errorf("Error running deleteDevice %s", err) + } + return nil +} + +func removeDevice(name string) error { + log.Debugf("[devmapper] removeDevice START") + defer log.Debugf("[devmapper] removeDevice END") + task, err := createTask(DeviceRemove, name) + if task == nil { + return err + } + dmSawBusy = false + if err = task.Run(); err != nil { + if dmSawBusy { + return ErrBusy + } + return fmt.Errorf("Error running removeDevice %s", err) + } + return nil +} + +func activateDevice(poolName string, name string, deviceId int, size uint64) error { + task, err := createTask(DeviceCreate, name) + if task == nil { + return err + } + + params := fmt.Sprintf("%s %d", poolName, deviceId) + if err := task.AddTarget(0, size/512, "thin", params); err != nil { + return fmt.Errorf("Can't add target %s", err) + } + if err := task.SetAddNode(AddNodeOnCreate); err != nil { + return fmt.Errorf("Can't add node %s", err) + } + + var cookie uint = 0 + if err := task.SetCookie(&cookie, 0); err != nil { + return fmt.Errorf("Can't set cookie %s", err) + } + + if err := task.Run(); err != nil { + return fmt.Errorf("Error running DeviceCreate (activateDevice) %s", err) + } + + UdevWait(cookie) + + return nil +} + +func createSnapDevice(poolName string, deviceId *int, baseName string, baseDeviceId int) error { + devinfo, _ := getInfo(baseName) + doSuspend := devinfo != nil && devinfo.Exists != 0 + + if doSuspend { + if err := suspendDevice(baseName); err != nil { + return err + } + } + + for { + task, err := createTask(DeviceTargetMsg, poolName) + if task == nil { + if doSuspend { + resumeDevice(baseName) + } + return err + } + + if err := task.SetSector(0); err != nil { + if doSuspend { + resumeDevice(baseName) + } + return fmt.Errorf("Can't set sector %s", err) + } + + if err := task.SetMessage(fmt.Sprintf("create_snap %d %d", *deviceId, baseDeviceId)); err != nil { + if doSuspend { + resumeDevice(baseName) + } + return fmt.Errorf("Can't set message %s", err) + } + + dmSawExist = false + if err := task.Run(); err != nil { + if dmSawExist { + // Already exists, try next id + *deviceId++ + continue + } + + if doSuspend { + resumeDevice(baseName) + } + return fmt.Errorf("Error running DeviceCreate (createSnapDevice) %s", err) + } + + break + } + + if doSuspend { + if err := resumeDevice(baseName); err != nil { + return err + } + } + + return nil +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper_doc.go b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper_doc.go new file mode 100644 index 0000000000..c1c3e3891b --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper_doc.go @@ -0,0 +1,106 @@ +package devmapper + +// Definition of struct dm_task and sub structures (from lvm2) +// +// struct dm_ioctl { +// /* +// * The version number is made up of three parts: +// * major - no backward or forward compatibility, +// * minor - only backwards compatible, +// * patch - both backwards and forwards compatible. +// * +// * All clients of the ioctl interface should fill in the +// * version number of the interface that they were +// * compiled with. +// * +// * All recognised ioctl commands (ie. those that don't +// * return -ENOTTY) fill out this field, even if the +// * command failed. +// */ +// uint32_t version[3]; /* in/out */ +// uint32_t data_size; /* total size of data passed in +// * including this struct */ + +// uint32_t data_start; /* offset to start of data +// * relative to start of this struct */ + +// uint32_t target_count; /* in/out */ +// int32_t open_count; /* out */ +// uint32_t flags; /* in/out */ + +// /* +// * event_nr holds either the event number (input and output) or the +// * udev cookie value (input only). +// * The DM_DEV_WAIT ioctl takes an event number as input. +// * The DM_SUSPEND, DM_DEV_REMOVE and DM_DEV_RENAME ioctls +// * use the field as a cookie to return in the DM_COOKIE +// * variable with the uevents they issue. +// * For output, the ioctls return the event number, not the cookie. +// */ +// uint32_t event_nr; /* in/out */ +// uint32_t padding; + +// uint64_t dev; /* in/out */ + +// char name[DM_NAME_LEN]; /* device name */ +// char uuid[DM_UUID_LEN]; /* unique identifier for +// * the block device */ +// char data[7]; /* padding or data */ +// }; + +// struct target { +// uint64_t start; +// uint64_t length; +// char *type; +// char *params; + +// struct target *next; +// }; + +// typedef enum { +// DM_ADD_NODE_ON_RESUME, /* add /dev/mapper node with dmsetup resume */ +// DM_ADD_NODE_ON_CREATE /* add /dev/mapper node with dmsetup create */ +// } dm_add_node_t; + +// struct dm_task { +// int type; +// char *dev_name; +// char *mangled_dev_name; + +// struct target *head, *tail; + +// int read_only; +// uint32_t event_nr; +// int major; +// int minor; +// int allow_default_major_fallback; +// uid_t uid; +// gid_t gid; +// mode_t mode; +// uint32_t read_ahead; +// uint32_t read_ahead_flags; +// union { +// struct dm_ioctl *v4; +// } dmi; +// char *newname; +// char *message; +// char *geometry; +// uint64_t sector; +// int no_flush; +// int no_open_count; +// int skip_lockfs; +// int query_inactive_table; +// int suppress_identical_reload; +// dm_add_node_t add_node; +// uint64_t existing_table_size; +// int cookie_set; +// int new_uuid; +// int secure_data; +// int retry_remove; +// int enable_checks; +// int expected_errno; + +// char *uuid; +// char *mangled_uuid; +// }; +// diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper_log.go b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper_log.go new file mode 100644 index 0000000000..ec7809cc51 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper_log.go @@ -0,0 +1,30 @@ +// +build linux + +package devmapper + +import "C" + +import ( + "strings" +) + +// Due to the way cgo works this has to be in a separate file, as devmapper.go has +// definitions in the cgo block, which is incompatible with using "//export" + +//export DevmapperLogCallback +func DevmapperLogCallback(level C.int, file *C.char, line C.int, dm_errno_or_class C.int, message *C.char) { + msg := C.GoString(message) + if level < 7 { + if strings.Contains(msg, "busy") { + dmSawBusy = true + } + + if strings.Contains(msg, "File exists") { + dmSawExist = true + } + } + + if dmLogger != nil { + dmLogger.log(int(level), C.GoString(file), int(line), int(dm_errno_or_class), msg) + } +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper_test.go b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper_test.go new file mode 100644 index 0000000000..167261999e --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper_test.go @@ -0,0 +1,37 @@ +// +build linux + +package devmapper + +import ( + "github.com/docker/docker/daemon/graphdriver/graphtest" + "testing" +) + +func init() { + // Reduce the size the the base fs and loopback for the tests + DefaultDataLoopbackSize = 300 * 1024 * 1024 + DefaultMetaDataLoopbackSize = 200 * 1024 * 1024 + DefaultBaseFsSize = 300 * 1024 * 1024 +} + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestDevmapperSetup and TestDevmapperTeardown +func TestDevmapperSetup(t *testing.T) { + graphtest.GetDriver(t, "devicemapper") +} + +func TestDevmapperCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "devicemapper") +} + +func TestDevmapperCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "devicemapper") +} + +func TestDevmapperCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "devicemapper") +} + +func TestDevmapperTeardown(t *testing.T) { + graphtest.PutDriver(t) +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper_wrapper.go b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper_wrapper.go new file mode 100644 index 0000000000..bd1c6fd5b6 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper_wrapper.go @@ -0,0 +1,240 @@ +// +build linux + +package devmapper + +/* +#cgo LDFLAGS: -L. -ldevmapper +#include +#include // FIXME: present only for defines, maybe we can remove it? +#include // FIXME: present only for BLKGETSIZE64, maybe we can remove it? + +#ifndef LOOP_CTL_GET_FREE + #define LOOP_CTL_GET_FREE 0x4C82 +#endif + +#ifndef LO_FLAGS_PARTSCAN + #define LO_FLAGS_PARTSCAN 8 +#endif + +// FIXME: Can't we find a way to do the logging in pure Go? +extern void DevmapperLogCallback(int level, char *file, int line, int dm_errno_or_class, char *str); + +static void log_cb(int level, const char *file, int line, int dm_errno_or_class, const char *f, ...) +{ + char buffer[256]; + va_list ap; + + va_start(ap, f); + vsnprintf(buffer, 256, f, ap); + va_end(ap); + + DevmapperLogCallback(level, (char *)file, line, dm_errno_or_class, buffer); +} + +static void log_with_errno_init() +{ + dm_log_with_errno_init(log_cb); +} +*/ +import "C" + +import ( + "unsafe" +) + +type ( + CDmTask C.struct_dm_task + + CLoopInfo64 C.struct_loop_info64 + LoopInfo64 struct { + loDevice uint64 /* ioctl r/o */ + loInode uint64 /* ioctl r/o */ + loRdevice uint64 /* ioctl r/o */ + loOffset uint64 + loSizelimit uint64 /* bytes, 0 == max available */ + loNumber uint32 /* ioctl r/o */ + loEncrypt_type uint32 + loEncrypt_key_size uint32 /* ioctl w/o */ + loFlags uint32 /* ioctl r/o */ + loFileName [LoNameSize]uint8 + loCryptName [LoNameSize]uint8 + loEncryptKey [LoKeySize]uint8 /* ioctl w/o */ + loInit [2]uint64 + } +) + +// IOCTL consts +const ( + BlkGetSize64 = C.BLKGETSIZE64 + BlkDiscard = C.BLKDISCARD + + LoopSetFd = C.LOOP_SET_FD + LoopCtlGetFree = C.LOOP_CTL_GET_FREE + LoopGetStatus64 = C.LOOP_GET_STATUS64 + LoopSetStatus64 = C.LOOP_SET_STATUS64 + LoopClrFd = C.LOOP_CLR_FD + LoopSetCapacity = C.LOOP_SET_CAPACITY +) + +const ( + LoFlagsAutoClear = C.LO_FLAGS_AUTOCLEAR + LoFlagsReadOnly = C.LO_FLAGS_READ_ONLY + LoFlagsPartScan = C.LO_FLAGS_PARTSCAN + LoKeySize = C.LO_KEY_SIZE + LoNameSize = C.LO_NAME_SIZE +) + +var ( + DmGetLibraryVersion = dmGetLibraryVersionFct + DmGetNextTarget = dmGetNextTargetFct + DmLogInitVerbose = dmLogInitVerboseFct + DmSetDevDir = dmSetDevDirFct + DmTaskAddTarget = dmTaskAddTargetFct + DmTaskCreate = dmTaskCreateFct + DmTaskDestroy = dmTaskDestroyFct + DmTaskGetInfo = dmTaskGetInfoFct + DmTaskGetDriverVersion = dmTaskGetDriverVersionFct + DmTaskRun = dmTaskRunFct + DmTaskSetAddNode = dmTaskSetAddNodeFct + DmTaskSetCookie = dmTaskSetCookieFct + DmTaskSetMessage = dmTaskSetMessageFct + DmTaskSetName = dmTaskSetNameFct + DmTaskSetRo = dmTaskSetRoFct + DmTaskSetSector = dmTaskSetSectorFct + DmUdevWait = dmUdevWaitFct + LogWithErrnoInit = logWithErrnoInitFct +) + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +func dmTaskDestroyFct(task *CDmTask) { + C.dm_task_destroy((*C.struct_dm_task)(task)) +} + +func dmTaskCreateFct(taskType int) *CDmTask { + return (*CDmTask)(C.dm_task_create(C.int(taskType))) +} + +func dmTaskRunFct(task *CDmTask) int { + ret, _ := C.dm_task_run((*C.struct_dm_task)(task)) + return int(ret) +} + +func dmTaskSetNameFct(task *CDmTask, name string) int { + Cname := C.CString(name) + defer free(Cname) + + return int(C.dm_task_set_name((*C.struct_dm_task)(task), Cname)) +} + +func dmTaskSetMessageFct(task *CDmTask, message string) int { + Cmessage := C.CString(message) + defer free(Cmessage) + + return int(C.dm_task_set_message((*C.struct_dm_task)(task), Cmessage)) +} + +func dmTaskSetSectorFct(task *CDmTask, sector uint64) int { + return int(C.dm_task_set_sector((*C.struct_dm_task)(task), C.uint64_t(sector))) +} + +func dmTaskSetCookieFct(task *CDmTask, cookie *uint, flags uint16) int { + cCookie := C.uint32_t(*cookie) + defer func() { + *cookie = uint(cCookie) + }() + return int(C.dm_task_set_cookie((*C.struct_dm_task)(task), &cCookie, C.uint16_t(flags))) +} + +func dmTaskSetAddNodeFct(task *CDmTask, addNode AddNodeType) int { + return int(C.dm_task_set_add_node((*C.struct_dm_task)(task), C.dm_add_node_t(addNode))) +} + +func dmTaskSetRoFct(task *CDmTask) int { + return int(C.dm_task_set_ro((*C.struct_dm_task)(task))) +} + +func dmTaskAddTargetFct(task *CDmTask, + start, size uint64, ttype, params string) int { + + Cttype := C.CString(ttype) + defer free(Cttype) + + Cparams := C.CString(params) + defer free(Cparams) + + return int(C.dm_task_add_target((*C.struct_dm_task)(task), C.uint64_t(start), C.uint64_t(size), Cttype, Cparams)) +} + +func dmTaskGetInfoFct(task *CDmTask, info *Info) int { + Cinfo := C.struct_dm_info{} + defer func() { + info.Exists = int(Cinfo.exists) + info.Suspended = int(Cinfo.suspended) + info.LiveTable = int(Cinfo.live_table) + info.InactiveTable = int(Cinfo.inactive_table) + info.OpenCount = int32(Cinfo.open_count) + info.EventNr = uint32(Cinfo.event_nr) + info.Major = uint32(Cinfo.major) + info.Minor = uint32(Cinfo.minor) + info.ReadOnly = int(Cinfo.read_only) + info.TargetCount = int32(Cinfo.target_count) + }() + return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo)) +} + +func dmTaskGetDriverVersionFct(task *CDmTask) string { + buffer := C.malloc(128) + defer C.free(buffer) + res := C.dm_task_get_driver_version((*C.struct_dm_task)(task), (*C.char)(buffer), 128) + if res == 0 { + return "" + } + return C.GoString((*C.char)(buffer)) +} + +func dmGetNextTargetFct(task *CDmTask, next uintptr, start, length *uint64, target, params *string) uintptr { + var ( + Cstart, Clength C.uint64_t + CtargetType, Cparams *C.char + ) + defer func() { + *start = uint64(Cstart) + *length = uint64(Clength) + *target = C.GoString(CtargetType) + *params = C.GoString(Cparams) + }() + + nextp := C.dm_get_next_target((*C.struct_dm_task)(task), unsafe.Pointer(next), &Cstart, &Clength, &CtargetType, &Cparams) + return uintptr(nextp) +} + +func dmUdevWaitFct(cookie uint) int { + return int(C.dm_udev_wait(C.uint32_t(cookie))) +} + +func dmLogInitVerboseFct(level int) { + C.dm_log_init_verbose(C.int(level)) +} + +func logWithErrnoInitFct() { + C.log_with_errno_init() +} + +func dmSetDevDirFct(dir string) int { + Cdir := C.CString(dir) + defer free(Cdir) + + return int(C.dm_set_dev_dir(Cdir)) +} + +func dmGetLibraryVersionFct(version *string) int { + buffer := C.CString(string(make([]byte, 128))) + defer free(buffer) + defer func() { + *version = C.GoString(buffer) + }() + return int(C.dm_get_library_version(buffer, 128)) +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/driver.go b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/driver.go new file mode 100644 index 0000000000..8f9de85d4e --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/driver.go @@ -0,0 +1,151 @@ +// +build linux + +package devmapper + +import ( + "fmt" + "io/ioutil" + "os" + "path" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/units" +) + +func init() { + graphdriver.Register("devicemapper", Init) +} + +// Placeholder interfaces, to be replaced +// at integration. + +// End of placeholder interfaces. + +type Driver struct { + *DeviceSet + home string +} + +func Init(home string, options []string) (graphdriver.Driver, error) { + deviceSet, err := NewDeviceSet(home, true, options) + if err != nil { + return nil, err + } + + if err := graphdriver.MakePrivate(home); err != nil { + return nil, err + } + + d := &Driver{ + DeviceSet: deviceSet, + home: home, + } + + return graphdriver.NaiveDiffDriver(d), nil +} + +func (d *Driver) String() string { + return "devicemapper" +} + +func (d *Driver) Status() [][2]string { + s := d.DeviceSet.Status() + + status := [][2]string{ + {"Pool Name", s.PoolName}, + {"Pool Blocksize", fmt.Sprintf("%s", units.HumanSize(int64(s.SectorSize)))}, + {"Data file", s.DataLoopback}, + {"Metadata file", s.MetadataLoopback}, + {"Data Space Used", fmt.Sprintf("%s", units.HumanSize(int64(s.Data.Used)))}, + {"Data Space Total", fmt.Sprintf("%s", units.HumanSize(int64(s.Data.Total)))}, + {"Metadata Space Used", fmt.Sprintf("%s", units.HumanSize(int64(s.Metadata.Used)))}, + {"Metadata Space Total", fmt.Sprintf("%s", units.HumanSize(int64(s.Metadata.Total)))}, + } + if vStr, err := GetLibraryVersion(); err == nil { + status = append(status, [2]string{"Library Version", vStr}) + } + return status +} + +func (d *Driver) Cleanup() error { + err := d.DeviceSet.Shutdown() + + if err2 := mount.Unmount(d.home); err == nil { + err = err2 + } + + return err +} + +func (d *Driver) Create(id, parent string) error { + if err := d.DeviceSet.AddDevice(id, parent); err != nil { + return err + } + + return nil +} + +func (d *Driver) Remove(id string) error { + if !d.DeviceSet.HasDevice(id) { + // Consider removing a non-existing device a no-op + // This is useful to be able to progress on container removal + // if the underlying device has gone away due to earlier errors + return nil + } + + // This assumes the device has been properly Get/Put:ed and thus is unmounted + if err := d.DeviceSet.DeleteDevice(id); err != nil { + return err + } + + mp := path.Join(d.home, "mnt", id) + if err := os.RemoveAll(mp); err != nil && !os.IsNotExist(err) { + return err + } + + return nil +} + +func (d *Driver) Get(id, mountLabel string) (string, error) { + mp := path.Join(d.home, "mnt", id) + + // Create the target directories if they don't exist + if err := os.MkdirAll(mp, 0755); err != nil && !os.IsExist(err) { + return "", err + } + + // Mount the device + if err := d.DeviceSet.MountDevice(id, mp, mountLabel); err != nil { + return "", err + } + + rootFs := path.Join(mp, "rootfs") + if err := os.MkdirAll(rootFs, 0755); err != nil && !os.IsExist(err) { + d.DeviceSet.UnmountDevice(id) + return "", err + } + + idFile := path.Join(mp, "id") + if _, err := os.Stat(idFile); err != nil && os.IsNotExist(err) { + // Create an "id" file with the container/image id in it to help reconscruct this in case + // of later problems + if err := ioutil.WriteFile(idFile, []byte(id), 0600); err != nil { + d.DeviceSet.UnmountDevice(id) + return "", err + } + } + + return rootFs, nil +} + +func (d *Driver) Put(id string) { + if err := d.DeviceSet.UnmountDevice(id); err != nil { + log.Errorf("Warning: error unmounting device %s: %s", id, err) + } +} + +func (d *Driver) Exists(id string) bool { + return d.DeviceSet.HasDevice(id) +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/ioctl.go b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/ioctl.go new file mode 100644 index 0000000000..29caab0664 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/ioctl.go @@ -0,0 +1,72 @@ +// +build linux + +package devmapper + +import ( + "syscall" + "unsafe" +) + +func ioctlLoopCtlGetFree(fd uintptr) (int, error) { + index, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, LoopCtlGetFree, 0) + if err != 0 { + return 0, err + } + return int(index), nil +} + +func ioctlLoopSetFd(loopFd, sparseFd uintptr) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetFd, sparseFd); err != 0 { + return err + } + return nil +} + +func ioctlLoopSetStatus64(loopFd uintptr, loopInfo *LoopInfo64) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { + return err + } + return nil +} + +func ioctlLoopClrFd(loopFd uintptr) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopClrFd, 0); err != 0 { + return err + } + return nil +} + +func ioctlLoopGetStatus64(loopFd uintptr) (*LoopInfo64, error) { + loopInfo := &LoopInfo64{} + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopGetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { + return nil, err + } + return loopInfo, nil +} + +func ioctlLoopSetCapacity(loopFd uintptr, value int) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetCapacity, uintptr(value)); err != 0 { + return err + } + return nil +} + +func ioctlBlkGetSize64(fd uintptr) (int64, error) { + var size int64 + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, BlkGetSize64, uintptr(unsafe.Pointer(&size))); err != 0 { + return 0, err + } + return size, nil +} + +func ioctlBlkDiscard(fd uintptr, offset, length uint64) error { + var r [2]uint64 + r[0] = offset + r[1] = length + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, BlkDiscard, uintptr(unsafe.Pointer(&r[0]))); err != 0 { + return err + } + return nil +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/mount.go b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/mount.go new file mode 100644 index 0000000000..f64e995744 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/devmapper/mount.go @@ -0,0 +1,86 @@ +// +build linux + +package devmapper + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "syscall" +) + +// FIXME: this is copy-pasted from the aufs driver. +// It should be moved into the core. + +func Mounted(mountpoint string) (bool, error) { + mntpoint, err := os.Stat(mountpoint) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + parent, err := os.Stat(filepath.Join(mountpoint, "..")) + if err != nil { + return false, err + } + mntpointSt := mntpoint.Sys().(*syscall.Stat_t) + parentSt := parent.Sys().(*syscall.Stat_t) + return mntpointSt.Dev != parentSt.Dev, nil +} + +type probeData struct { + fsName string + magic string + offset uint64 +} + +func ProbeFsType(device string) (string, error) { + probes := []probeData{ + {"btrfs", "_BHRfS_M", 0x10040}, + {"ext4", "\123\357", 0x438}, + {"xfs", "XFSB", 0}, + } + + maxLen := uint64(0) + for _, p := range probes { + l := p.offset + uint64(len(p.magic)) + if l > maxLen { + maxLen = l + } + } + + file, err := os.Open(device) + if err != nil { + return "", err + } + + buffer := make([]byte, maxLen) + l, err := file.Read(buffer) + if err != nil { + return "", err + } + file.Close() + if uint64(l) != maxLen { + return "", fmt.Errorf("unable to detect filesystem type of %s, short read", device) + } + + for _, p := range probes { + if bytes.Equal([]byte(p.magic), buffer[p.offset:p.offset+uint64(len(p.magic))]) { + return p.fsName, nil + } + } + + return "", fmt.Errorf("Unknown filesystem type on %s", device) +} + +func joinMountOptions(a, b string) string { + if a == "" { + return b + } + if b == "" { + return a + } + return a + "," + b +} diff --git a/tests/_vendor/src/github.com/docker/docker/daemon/graphdriver/driver.go b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/driver.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/daemon/graphdriver/driver.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/driver.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/fsdiff.go b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/fsdiff.go new file mode 100644 index 0000000000..5e9d32c1c8 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/fsdiff.go @@ -0,0 +1,165 @@ +package graphdriver + +import ( + "fmt" + "time" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/utils" +) + +// naiveDiffDriver takes a ProtoDriver and adds the +// capability of the Diffing methods which it may or may not +// support on its own. See the comment on the exported +// NaiveDiffDriver function below. +// Notably, the AUFS driver doesn't need to be wrapped like this. +type naiveDiffDriver struct { + ProtoDriver +} + +// NaiveDiffDriver returns a fully functional driver that wraps the +// given ProtoDriver and adds the capability of the following methods which +// it may or may not support on its own: +// Diff(id, parent string) (archive.Archive, error) +// Changes(id, parent string) ([]archive.Change, error) +// ApplyDiff(id, parent string, diff archive.ArchiveReader) (bytes int64, err error) +// DiffSize(id, parent string) (bytes int64, err error) +func NaiveDiffDriver(driver ProtoDriver) Driver { + return &naiveDiffDriver{ProtoDriver: driver} +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (gdw *naiveDiffDriver) Diff(id, parent string) (arch archive.Archive, err error) { + driver := gdw.ProtoDriver + + layerFs, err := driver.Get(id, "") + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + driver.Put(id) + } + }() + + if parent == "" { + archive, err := archive.Tar(layerFs, archive.Uncompressed) + if err != nil { + return nil, err + } + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + driver.Put(id) + return err + }), nil + } + + parentFs, err := driver.Get(parent, "") + if err != nil { + return nil, err + } + defer driver.Put(parent) + + changes, err := archive.ChangesDirs(layerFs, parentFs) + if err != nil { + return nil, err + } + + archive, err := archive.ExportChanges(layerFs, changes) + if err != nil { + return nil, err + } + + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + driver.Put(id) + return err + }), nil +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (gdw *naiveDiffDriver) Changes(id, parent string) ([]archive.Change, error) { + driver := gdw.ProtoDriver + + layerFs, err := driver.Get(id, "") + if err != nil { + return nil, err + } + defer driver.Put(id) + + parentFs := "" + + if parent != "" { + parentFs, err = driver.Get(parent, "") + if err != nil { + return nil, err + } + defer driver.Put(parent) + } + + return archive.ChangesDirs(layerFs, parentFs) +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. +func (gdw *naiveDiffDriver) ApplyDiff(id, parent string, diff archive.ArchiveReader) (bytes int64, err error) { + driver := gdw.ProtoDriver + + // Mount the root filesystem so we can apply the diff/layer. + layerFs, err := driver.Get(id, "") + if err != nil { + return + } + defer driver.Put(id) + + start := time.Now().UTC() + log.Debugf("Start untar layer") + if err = archive.ApplyLayer(layerFs, diff); err != nil { + return + } + log.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) + + if parent == "" { + return utils.TreeSize(layerFs) + } + + parentFs, err := driver.Get(parent, "") + if err != nil { + err = fmt.Errorf("Driver %s failed to get image parent %s: %s", driver, parent, err) + return + } + defer driver.Put(parent) + + changes, err := archive.ChangesDirs(layerFs, parentFs) + if err != nil { + return + } + + return archive.ChangesSize(layerFs, changes), nil +} + +// DiffSize calculates the changes between the specified layer +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (gdw *naiveDiffDriver) DiffSize(id, parent string) (bytes int64, err error) { + driver := gdw.ProtoDriver + + changes, err := gdw.Changes(id, parent) + if err != nil { + return + } + + layerFs, err := driver.Get(id, "") + if err != nil { + return + } + defer driver.Put(id) + + return archive.ChangesSize(layerFs, changes), nil +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest.go b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest.go new file mode 100644 index 0000000000..6407e1205d --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest.go @@ -0,0 +1,229 @@ +package graphtest + +import ( + "io/ioutil" + "os" + "path" + "syscall" + "testing" + + "github.com/docker/docker/daemon/graphdriver" +) + +var ( + drv *Driver +) + +type Driver struct { + graphdriver.Driver + root string + refCount int +} + +func newDriver(t *testing.T, name string) *Driver { + root, err := ioutil.TempDir("/var/tmp", "docker-graphtest-") + if err != nil { + t.Fatal(err) + } + + if err := os.MkdirAll(root, 0755); err != nil { + t.Fatal(err) + } + + d, err := graphdriver.GetDriver(name, root, nil) + if err != nil { + if err == graphdriver.ErrNotSupported || err == graphdriver.ErrPrerequisites { + t.Skip("Driver %s not supported", name) + } + t.Fatal(err) + } + return &Driver{d, root, 1} +} + +func cleanup(t *testing.T, d *Driver) { + if err := drv.Cleanup(); err != nil { + t.Fatal(err) + } + os.RemoveAll(d.root) +} + +func GetDriver(t *testing.T, name string) graphdriver.Driver { + if drv == nil { + drv = newDriver(t, name) + } else { + drv.refCount++ + } + return drv +} + +func PutDriver(t *testing.T) { + if drv == nil { + t.Skip("No driver to put!") + } + drv.refCount-- + if drv.refCount == 0 { + cleanup(t, drv) + drv = nil + } +} + +func verifyFile(t *testing.T, path string, mode os.FileMode, uid, gid uint32) { + fi, err := os.Stat(path) + if err != nil { + t.Fatal(err) + } + + if fi.Mode()&os.ModeType != mode&os.ModeType { + t.Fatalf("Expected %s type 0x%x, got 0x%x", path, mode&os.ModeType, fi.Mode()&os.ModeType) + } + + if fi.Mode()&os.ModePerm != mode&os.ModePerm { + t.Fatalf("Expected %s mode %o, got %o", path, mode&os.ModePerm, fi.Mode()&os.ModePerm) + } + + if fi.Mode()&os.ModeSticky != mode&os.ModeSticky { + t.Fatalf("Expected %s sticky 0x%x, got 0x%x", path, mode&os.ModeSticky, fi.Mode()&os.ModeSticky) + } + + if fi.Mode()&os.ModeSetuid != mode&os.ModeSetuid { + t.Fatalf("Expected %s setuid 0x%x, got 0x%x", path, mode&os.ModeSetuid, fi.Mode()&os.ModeSetuid) + } + + if fi.Mode()&os.ModeSetgid != mode&os.ModeSetgid { + t.Fatalf("Expected %s setgid 0x%x, got 0x%x", path, mode&os.ModeSetgid, fi.Mode()&os.ModeSetgid) + } + + if stat, ok := fi.Sys().(*syscall.Stat_t); ok { + if stat.Uid != uid { + t.Fatalf("%s no owned by uid %d", path, uid) + } + if stat.Gid != gid { + t.Fatalf("%s not owned by gid %d", path, gid) + } + } + +} + +// Creates an new image and verifies it is empty and the right metadata +func DriverTestCreateEmpty(t *testing.T, drivername string) { + driver := GetDriver(t, drivername) + defer PutDriver(t) + + if err := driver.Create("empty", ""); err != nil { + t.Fatal(err) + } + + if !driver.Exists("empty") { + t.Fatal("Newly created image doesn't exist") + } + + dir, err := driver.Get("empty", "") + if err != nil { + t.Fatal(err) + } + + verifyFile(t, dir, 0755|os.ModeDir, 0, 0) + + // Verify that the directory is empty + fis, err := ioutil.ReadDir(dir) + if err != nil { + t.Fatal(err) + } + + if len(fis) != 0 { + t.Fatal("New directory not empty") + } + + driver.Put("empty") + + if err := driver.Remove("empty"); err != nil { + t.Fatal(err) + } + +} + +func createBase(t *testing.T, driver graphdriver.Driver, name string) { + // We need to be able to set any perms + oldmask := syscall.Umask(0) + defer syscall.Umask(oldmask) + + if err := driver.Create(name, ""); err != nil { + t.Fatal(err) + } + + dir, err := driver.Get(name, "") + if err != nil { + t.Fatal(err) + } + defer driver.Put(name) + + subdir := path.Join(dir, "a subdir") + if err := os.Mkdir(subdir, 0705|os.ModeSticky); err != nil { + t.Fatal(err) + } + if err := os.Chown(subdir, 1, 2); err != nil { + t.Fatal(err) + } + + file := path.Join(dir, "a file") + if err := ioutil.WriteFile(file, []byte("Some data"), 0222|os.ModeSetuid); err != nil { + t.Fatal(err) + } +} + +func verifyBase(t *testing.T, driver graphdriver.Driver, name string) { + dir, err := driver.Get(name, "") + if err != nil { + t.Fatal(err) + } + defer driver.Put(name) + + subdir := path.Join(dir, "a subdir") + verifyFile(t, subdir, 0705|os.ModeDir|os.ModeSticky, 1, 2) + + file := path.Join(dir, "a file") + verifyFile(t, file, 0222|os.ModeSetuid, 0, 0) + + fis, err := ioutil.ReadDir(dir) + if err != nil { + t.Fatal(err) + } + + if len(fis) != 2 { + t.Fatal("Unexpected files in base image") + } + +} + +func DriverTestCreateBase(t *testing.T, drivername string) { + driver := GetDriver(t, drivername) + defer PutDriver(t) + + createBase(t, driver, "Base") + verifyBase(t, driver, "Base") + + if err := driver.Remove("Base"); err != nil { + t.Fatal(err) + } +} + +func DriverTestCreateSnap(t *testing.T, drivername string) { + driver := GetDriver(t, drivername) + defer PutDriver(t) + + createBase(t, driver, "Base") + + if err := driver.Create("Snap", "Base"); err != nil { + t.Fatal(err) + } + + verifyBase(t, driver, "Snap") + + if err := driver.Remove("Snap"); err != nil { + t.Fatal(err) + } + + if err := driver.Remove("Base"); err != nil { + t.Fatal(err) + } +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/vfs/driver.go b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/vfs/driver.go new file mode 100644 index 0000000000..a186060d03 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/vfs/driver.go @@ -0,0 +1,118 @@ +package vfs + +import ( + "bytes" + "fmt" + "os" + "os/exec" + "path" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/libcontainer/label" +) + +func init() { + graphdriver.Register("vfs", Init) +} + +func Init(home string, options []string) (graphdriver.Driver, error) { + d := &Driver{ + home: home, + } + return graphdriver.NaiveDiffDriver(d), nil +} + +type Driver struct { + home string +} + +func (d *Driver) String() string { + return "vfs" +} + +func (d *Driver) Status() [][2]string { + return nil +} + +func (d *Driver) Cleanup() error { + return nil +} + +func isGNUcoreutils() bool { + if stdout, err := exec.Command("cp", "--version").Output(); err == nil { + return bytes.Contains(stdout, []byte("GNU coreutils")) + } + + return false +} + +func copyDir(src, dst string) error { + argv := make([]string, 0, 4) + + if isGNUcoreutils() { + argv = append(argv, "-aT", "--reflink=auto", src, dst) + } else { + argv = append(argv, "-a", src+"/.", dst+"/.") + } + + if output, err := exec.Command("cp", argv...).CombinedOutput(); err != nil { + return fmt.Errorf("Error VFS copying directory: %s (%s)", err, output) + } + return nil +} + +func (d *Driver) Create(id, parent string) error { + dir := d.dir(id) + if err := os.MkdirAll(path.Dir(dir), 0700); err != nil { + return err + } + if err := os.Mkdir(dir, 0755); err != nil { + return err + } + opts := []string{"level:s0"} + if _, mountLabel, err := label.InitLabels(opts); err == nil { + label.Relabel(dir, mountLabel, "") + } + if parent == "" { + return nil + } + parentDir, err := d.Get(parent, "") + if err != nil { + return fmt.Errorf("%s: %s", parent, err) + } + if err := copyDir(parentDir, dir); err != nil { + return err + } + return nil +} + +func (d *Driver) dir(id string) string { + return path.Join(d.home, "dir", path.Base(id)) +} + +func (d *Driver) Remove(id string) error { + if _, err := os.Stat(d.dir(id)); err != nil { + return err + } + return os.RemoveAll(d.dir(id)) +} + +func (d *Driver) Get(id, mountLabel string) (string, error) { + dir := d.dir(id) + if st, err := os.Stat(dir); err != nil { + return "", err + } else if !st.IsDir() { + return "", fmt.Errorf("%s: not a directory", dir) + } + return dir, nil +} + +func (d *Driver) Put(id string) { + // The vfs driver has no runtime resources (e.g. mounts) + // to clean up, so we don't need anything here +} + +func (d *Driver) Exists(id string) bool { + _, err := os.Stat(d.dir(id)) + return err == nil +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/vfs/vfs_test.go b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/vfs/vfs_test.go new file mode 100644 index 0000000000..eaf70f59d3 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/daemon/graphdriver/vfs/vfs_test.go @@ -0,0 +1,28 @@ +package vfs + +import ( + "github.com/docker/docker/daemon/graphdriver/graphtest" + "testing" +) + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestVfsSetup and TestVfsTeardown +func TestVfsSetup(t *testing.T) { + graphtest.GetDriver(t, "vfs") +} + +func TestVfsCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "vfs") +} + +func TestVfsCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "vfs") +} + +func TestVfsCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "vfs") +} + +func TestVfsTeardown(t *testing.T) { + graphtest.PutDriver(t) +} diff --git a/tests/_vendor/src/github.com/docker/docker/dockerversion/dockerversion.go b/tests/Godeps/_workspace/src/github.com/docker/docker/dockerversion/dockerversion.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/dockerversion/dockerversion.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/dockerversion/dockerversion.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/engine/MAINTAINERS b/tests/Godeps/_workspace/src/github.com/docker/docker/engine/MAINTAINERS new file mode 100644 index 0000000000..aee10c8421 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/engine/MAINTAINERS @@ -0,0 +1 @@ +Solomon Hykes (@shykes) diff --git a/tests/_vendor/src/github.com/docker/docker/engine/engine.go b/tests/Godeps/_workspace/src/github.com/docker/docker/engine/engine.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/engine/engine.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/engine/engine.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/engine/engine_test.go b/tests/Godeps/_workspace/src/github.com/docker/docker/engine/engine_test.go new file mode 100644 index 0000000000..92f3757251 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/engine/engine_test.go @@ -0,0 +1,162 @@ +package engine + +import ( + "bytes" + "strings" + "testing" +) + +func TestRegister(t *testing.T) { + if err := Register("dummy1", nil); err != nil { + t.Fatal(err) + } + + if err := Register("dummy1", nil); err == nil { + t.Fatalf("Expecting error, got none") + } + // Register is global so let's cleanup to avoid conflicts + defer unregister("dummy1") + + eng := New() + + //Should fail because global handlers are copied + //at the engine creation + if err := eng.Register("dummy1", nil); err == nil { + t.Fatalf("Expecting error, got none") + } + + if err := eng.Register("dummy2", nil); err != nil { + t.Fatal(err) + } + + if err := eng.Register("dummy2", nil); err == nil { + t.Fatalf("Expecting error, got none") + } + defer unregister("dummy2") +} + +func TestJob(t *testing.T) { + eng := New() + job1 := eng.Job("dummy1", "--level=awesome") + + if job1.handler != nil { + t.Fatalf("job1.handler should be empty") + } + + h := func(j *Job) Status { + j.Printf("%s\n", j.Name) + return 42 + } + + eng.Register("dummy2", h) + defer unregister("dummy2") + job2 := eng.Job("dummy2", "--level=awesome") + + if job2.handler == nil { + t.Fatalf("job2.handler shouldn't be nil") + } + + if job2.handler(job2) != 42 { + t.Fatalf("handler dummy2 was not found in job2") + } +} + +func TestEngineShutdown(t *testing.T) { + eng := New() + if eng.IsShutdown() { + t.Fatalf("Engine should not show as shutdown") + } + eng.Shutdown() + if !eng.IsShutdown() { + t.Fatalf("Engine should show as shutdown") + } +} + +func TestEngineCommands(t *testing.T) { + eng := New() + handler := func(job *Job) Status { return StatusOK } + eng.Register("foo", handler) + eng.Register("bar", handler) + eng.Register("echo", handler) + eng.Register("die", handler) + var output bytes.Buffer + commands := eng.Job("commands") + commands.Stdout.Add(&output) + commands.Run() + expected := "bar\ncommands\ndie\necho\nfoo\n" + if result := output.String(); result != expected { + t.Fatalf("Unexpected output:\nExpected = %v\nResult = %v\n", expected, result) + } +} + +func TestEngineString(t *testing.T) { + eng1 := New() + eng2 := New() + s1 := eng1.String() + s2 := eng2.String() + if eng1 == eng2 { + t.Fatalf("Different engines should have different names (%v == %v)", s1, s2) + } +} + +func TestEngineLogf(t *testing.T) { + eng := New() + input := "Test log line" + if n, err := eng.Logf("%s\n", input); err != nil { + t.Fatal(err) + } else if n < len(input) { + t.Fatalf("Test: Logf() should print at least as much as the input\ninput=%d\nprinted=%d", len(input), n) + } +} + +func TestParseJob(t *testing.T) { + eng := New() + // Verify that the resulting job calls to the right place + var called bool + eng.Register("echo", func(job *Job) Status { + called = true + return StatusOK + }) + input := "echo DEBUG=1 hello world VERBOSITY=42" + job, err := eng.ParseJob(input) + if err != nil { + t.Fatal(err) + } + if job.Name != "echo" { + t.Fatalf("Invalid job name: %v", job.Name) + } + if strings.Join(job.Args, ":::") != "hello:::world" { + t.Fatalf("Invalid job args: %v", job.Args) + } + if job.Env().Get("DEBUG") != "1" { + t.Fatalf("Invalid job env: %v", job.Env) + } + if job.Env().Get("VERBOSITY") != "42" { + t.Fatalf("Invalid job env: %v", job.Env) + } + if len(job.Env().Map()) != 2 { + t.Fatalf("Invalid job env: %v", job.Env) + } + if err := job.Run(); err != nil { + t.Fatal(err) + } + if !called { + t.Fatalf("Job was not called") + } +} + +func TestCatchallEmptyName(t *testing.T) { + eng := New() + var called bool + eng.RegisterCatchall(func(job *Job) Status { + called = true + return StatusOK + }) + err := eng.Job("").Run() + if err == nil { + t.Fatalf("Engine.Job(\"\").Run() should return an error") + } + if called { + t.Fatalf("Engine.Job(\"\").Run() should return an error") + } +} diff --git a/tests/_vendor/src/github.com/docker/docker/engine/env.go b/tests/Godeps/_workspace/src/github.com/docker/docker/engine/env.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/engine/env.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/engine/env.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/engine/env_test.go b/tests/Godeps/_workspace/src/github.com/docker/docker/engine/env_test.go new file mode 100644 index 0000000000..b0caca9cbd --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/engine/env_test.go @@ -0,0 +1,324 @@ +package engine + +import ( + "bytes" + "encoding/json" + "testing" + + "github.com/docker/docker/pkg/testutils" +) + +func TestEnvLenZero(t *testing.T) { + env := &Env{} + if env.Len() != 0 { + t.Fatalf("%d", env.Len()) + } +} + +func TestEnvLenNotZero(t *testing.T) { + env := &Env{} + env.Set("foo", "bar") + env.Set("ga", "bu") + if env.Len() != 2 { + t.Fatalf("%d", env.Len()) + } +} + +func TestEnvLenDup(t *testing.T) { + env := &Env{ + "foo=bar", + "foo=baz", + "a=b", + } + // len(env) != env.Len() + if env.Len() != 2 { + t.Fatalf("%d", env.Len()) + } +} + +func TestEnvGetDup(t *testing.T) { + env := &Env{ + "foo=bar", + "foo=baz", + "foo=bif", + } + expected := "bif" + if v := env.Get("foo"); v != expected { + t.Fatalf("expect %q, got %q", expected, v) + } +} + +func TestNewJob(t *testing.T) { + job := mkJob(t, "dummy", "--level=awesome") + if job.Name != "dummy" { + t.Fatalf("Wrong job name: %s", job.Name) + } + if len(job.Args) != 1 { + t.Fatalf("Wrong number of job arguments: %d", len(job.Args)) + } + if job.Args[0] != "--level=awesome" { + t.Fatalf("Wrong job arguments: %s", job.Args[0]) + } +} + +func TestSetenv(t *testing.T) { + job := mkJob(t, "dummy") + job.Setenv("foo", "bar") + if val := job.Getenv("foo"); val != "bar" { + t.Fatalf("Getenv returns incorrect value: %s", val) + } + + job.Setenv("bar", "") + if val := job.Getenv("bar"); val != "" { + t.Fatalf("Getenv returns incorrect value: %s", val) + } + if val := job.Getenv("nonexistent"); val != "" { + t.Fatalf("Getenv returns incorrect value: %s", val) + } +} + +func TestSetenvBool(t *testing.T) { + job := mkJob(t, "dummy") + job.SetenvBool("foo", true) + if val := job.GetenvBool("foo"); !val { + t.Fatalf("GetenvBool returns incorrect value: %t", val) + } + + job.SetenvBool("bar", false) + if val := job.GetenvBool("bar"); val { + t.Fatalf("GetenvBool returns incorrect value: %t", val) + } + + if val := job.GetenvBool("nonexistent"); val { + t.Fatalf("GetenvBool returns incorrect value: %t", val) + } +} + +func TestSetenvInt(t *testing.T) { + job := mkJob(t, "dummy") + + job.SetenvInt("foo", -42) + if val := job.GetenvInt("foo"); val != -42 { + t.Fatalf("GetenvInt returns incorrect value: %d", val) + } + + job.SetenvInt("bar", 42) + if val := job.GetenvInt("bar"); val != 42 { + t.Fatalf("GetenvInt returns incorrect value: %d", val) + } + if val := job.GetenvInt("nonexistent"); val != 0 { + t.Fatalf("GetenvInt returns incorrect value: %d", val) + } +} + +func TestSetenvList(t *testing.T) { + job := mkJob(t, "dummy") + + job.SetenvList("foo", []string{"bar"}) + if val := job.GetenvList("foo"); len(val) != 1 || val[0] != "bar" { + t.Fatalf("GetenvList returns incorrect value: %v", val) + } + + job.SetenvList("bar", nil) + if val := job.GetenvList("bar"); val != nil { + t.Fatalf("GetenvList returns incorrect value: %v", val) + } + if val := job.GetenvList("nonexistent"); val != nil { + t.Fatalf("GetenvList returns incorrect value: %v", val) + } +} + +func TestEnviron(t *testing.T) { + job := mkJob(t, "dummy") + job.Setenv("foo", "bar") + val, exists := job.Environ()["foo"] + if !exists { + t.Fatalf("foo not found in the environ") + } + if val != "bar" { + t.Fatalf("bar not found in the environ") + } +} + +func TestMultiMap(t *testing.T) { + e := &Env{} + e.Set("foo", "bar") + e.Set("bar", "baz") + e.Set("hello", "world") + m := e.MultiMap() + e2 := &Env{} + e2.Set("old_key", "something something something") + e2.InitMultiMap(m) + if v := e2.Get("old_key"); v != "" { + t.Fatalf("%#v", v) + } + if v := e2.Get("bar"); v != "baz" { + t.Fatalf("%#v", v) + } + if v := e2.Get("hello"); v != "world" { + t.Fatalf("%#v", v) + } +} + +func testMap(l int) [][2]string { + res := make([][2]string, l) + for i := 0; i < l; i++ { + t := [2]string{testutils.RandomString(5), testutils.RandomString(20)} + res[i] = t + } + return res +} + +func BenchmarkSet(b *testing.B) { + fix := testMap(100) + b.ResetTimer() + for i := 0; i < b.N; i++ { + env := &Env{} + for _, kv := range fix { + env.Set(kv[0], kv[1]) + } + } +} + +func BenchmarkSetJson(b *testing.B) { + fix := testMap(100) + type X struct { + f string + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + env := &Env{} + for _, kv := range fix { + if err := env.SetJson(kv[0], X{kv[1]}); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkGet(b *testing.B) { + fix := testMap(100) + env := &Env{} + for _, kv := range fix { + env.Set(kv[0], kv[1]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, kv := range fix { + env.Get(kv[0]) + } + } +} + +func BenchmarkGetJson(b *testing.B) { + fix := testMap(100) + env := &Env{} + type X struct { + f string + } + for _, kv := range fix { + env.SetJson(kv[0], X{kv[1]}) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, kv := range fix { + if err := env.GetJson(kv[0], &X{}); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkEncode(b *testing.B) { + fix := testMap(100) + env := &Env{} + type X struct { + f string + } + // half a json + for i, kv := range fix { + if i%2 != 0 { + if err := env.SetJson(kv[0], X{kv[1]}); err != nil { + b.Fatal(err) + } + continue + } + env.Set(kv[0], kv[1]) + } + var writer bytes.Buffer + b.ResetTimer() + for i := 0; i < b.N; i++ { + env.Encode(&writer) + writer.Reset() + } +} + +func BenchmarkDecode(b *testing.B) { + fix := testMap(100) + env := &Env{} + type X struct { + f string + } + // half a json + for i, kv := range fix { + if i%2 != 0 { + if err := env.SetJson(kv[0], X{kv[1]}); err != nil { + b.Fatal(err) + } + continue + } + env.Set(kv[0], kv[1]) + } + var writer bytes.Buffer + env.Encode(&writer) + denv := &Env{} + reader := bytes.NewReader(writer.Bytes()) + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := denv.Decode(reader) + if err != nil { + b.Fatal(err) + } + reader.Seek(0, 0) + } +} + +func TestLongNumbers(t *testing.T) { + type T struct { + TestNum int64 + } + v := T{67108864} + var buf bytes.Buffer + e := &Env{} + e.SetJson("Test", v) + if err := e.Encode(&buf); err != nil { + t.Fatal(err) + } + res := make(map[string]T) + if err := json.Unmarshal(buf.Bytes(), &res); err != nil { + t.Fatal(err) + } + if res["Test"].TestNum != v.TestNum { + t.Fatalf("TestNum %d, expected %d", res["Test"].TestNum, v.TestNum) + } +} + +func TestLongNumbersArray(t *testing.T) { + type T struct { + TestNum []int64 + } + v := T{[]int64{67108864}} + var buf bytes.Buffer + e := &Env{} + e.SetJson("Test", v) + if err := e.Encode(&buf); err != nil { + t.Fatal(err) + } + res := make(map[string]T) + if err := json.Unmarshal(buf.Bytes(), &res); err != nil { + t.Fatal(err) + } + if res["Test"].TestNum[0] != v.TestNum[0] { + t.Fatalf("TestNum %d, expected %d", res["Test"].TestNum, v.TestNum) + } +} diff --git a/tests/_vendor/src/github.com/docker/docker/engine/hack.go b/tests/Godeps/_workspace/src/github.com/docker/docker/engine/hack.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/engine/hack.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/engine/hack.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/engine/helpers_test.go b/tests/Godeps/_workspace/src/github.com/docker/docker/engine/helpers_test.go new file mode 100644 index 0000000000..cfa11da7cd --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/engine/helpers_test.go @@ -0,0 +1,11 @@ +package engine + +import ( + "testing" +) + +var globalTestID string + +func mkJob(t *testing.T, name string, args ...string) *Job { + return New().Job(name, args...) +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/engine/http.go b/tests/Godeps/_workspace/src/github.com/docker/docker/engine/http.go new file mode 100644 index 0000000000..7e4dcd7bb4 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/engine/http.go @@ -0,0 +1,42 @@ +package engine + +import ( + "net/http" + "path" +) + +// ServeHTTP executes a job as specified by the http request `r`, and sends the +// result as an http response. +// This method allows an Engine instance to be passed as a standard http.Handler interface. +// +// Note that the protocol used in this method is a convenience wrapper and is not the canonical +// implementation of remote job execution. This is because HTTP/1 does not handle stream multiplexing, +// and so cannot differentiate stdout from stderr. Additionally, headers cannot be added to a response +// once data has been written to the body, which makes it inconvenient to return metadata such +// as the exit status. +// +func (eng *Engine) ServeHTTP(w http.ResponseWriter, r *http.Request) { + var ( + jobName = path.Base(r.URL.Path) + jobArgs, exists = r.URL.Query()["a"] + ) + if !exists { + jobArgs = []string{} + } + w.Header().Set("Job-Name", jobName) + for _, arg := range jobArgs { + w.Header().Add("Job-Args", arg) + } + job := eng.Job(jobName, jobArgs...) + job.Stdout.Add(w) + job.Stderr.Add(w) + // FIXME: distinguish job status from engine error in Run() + // The former should be passed as a special header, the former + // should cause a 500 status + w.WriteHeader(http.StatusOK) + // The exit status cannot be sent reliably with HTTP1, because headers + // can only be sent before the body. + // (we could possibly use http footers via chunked encoding, but I couldn't find + // how to use them in net/http) + job.Run() +} diff --git a/tests/_vendor/src/github.com/docker/docker/engine/job.go b/tests/Godeps/_workspace/src/github.com/docker/docker/engine/job.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/engine/job.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/engine/job.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/engine/job_test.go b/tests/Godeps/_workspace/src/github.com/docker/docker/engine/job_test.go new file mode 100644 index 0000000000..67e723988e --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/engine/job_test.go @@ -0,0 +1,75 @@ +package engine + +import ( + "bytes" + "fmt" + "testing" +) + +func TestJobStatusOK(t *testing.T) { + eng := New() + eng.Register("return_ok", func(job *Job) Status { return StatusOK }) + err := eng.Job("return_ok").Run() + if err != nil { + t.Fatalf("Expected: err=%v\nReceived: err=%v", nil, err) + } +} + +func TestJobStatusErr(t *testing.T) { + eng := New() + eng.Register("return_err", func(job *Job) Status { return StatusErr }) + err := eng.Job("return_err").Run() + if err == nil { + t.Fatalf("When a job returns StatusErr, Run() should return an error") + } +} + +func TestJobStatusNotFound(t *testing.T) { + eng := New() + eng.Register("return_not_found", func(job *Job) Status { return StatusNotFound }) + err := eng.Job("return_not_found").Run() + if err == nil { + t.Fatalf("When a job returns StatusNotFound, Run() should return an error") + } +} + +func TestJobStdoutString(t *testing.T) { + eng := New() + // FIXME: test multiple combinations of output and status + eng.Register("say_something_in_stdout", func(job *Job) Status { + job.Printf("Hello world\n") + return StatusOK + }) + + job := eng.Job("say_something_in_stdout") + var outputBuffer = bytes.NewBuffer(nil) + job.Stdout.Add(outputBuffer) + if err := job.Run(); err != nil { + t.Fatal(err) + } + fmt.Println(outputBuffer) + var output = Tail(outputBuffer, 1) + if expectedOutput := "Hello world"; output != expectedOutput { + t.Fatalf("Stdout last line:\nExpected: %v\nReceived: %v", expectedOutput, output) + } +} + +func TestJobStderrString(t *testing.T) { + eng := New() + // FIXME: test multiple combinations of output and status + eng.Register("say_something_in_stderr", func(job *Job) Status { + job.Errorf("Warning, something might happen\nHere it comes!\nOh no...\nSomething happened\n") + return StatusOK + }) + + job := eng.Job("say_something_in_stderr") + var outputBuffer = bytes.NewBuffer(nil) + job.Stderr.Add(outputBuffer) + if err := job.Run(); err != nil { + t.Fatal(err) + } + var output = Tail(outputBuffer, 1) + if expectedOutput := "Something happened"; output != expectedOutput { + t.Fatalf("Stderr last line:\nExpected: %v\nReceived: %v", expectedOutput, output) + } +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/engine/shutdown_test.go b/tests/Godeps/_workspace/src/github.com/docker/docker/engine/shutdown_test.go new file mode 100644 index 0000000000..13d8049267 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/engine/shutdown_test.go @@ -0,0 +1,80 @@ +package engine + +import ( + "testing" + "time" +) + +func TestShutdownEmpty(t *testing.T) { + eng := New() + if eng.IsShutdown() { + t.Fatalf("IsShutdown should be false") + } + eng.Shutdown() + if !eng.IsShutdown() { + t.Fatalf("IsShutdown should be true") + } +} + +func TestShutdownAfterRun(t *testing.T) { + eng := New() + var called bool + eng.Register("foo", func(job *Job) Status { + called = true + return StatusOK + }) + if err := eng.Job("foo").Run(); err != nil { + t.Fatal(err) + } + eng.Shutdown() + if err := eng.Job("foo").Run(); err == nil { + t.Fatalf("%#v", *eng) + } +} + +// An approximate and racy, but better-than-nothing test that +// +func TestShutdownDuringRun(t *testing.T) { + var ( + jobDelay time.Duration = 500 * time.Millisecond + jobDelayLow time.Duration = 100 * time.Millisecond + jobDelayHigh time.Duration = 700 * time.Millisecond + ) + eng := New() + var completed bool + eng.Register("foo", func(job *Job) Status { + time.Sleep(jobDelay) + completed = true + return StatusOK + }) + go eng.Job("foo").Run() + time.Sleep(50 * time.Millisecond) + done := make(chan struct{}) + var startShutdown time.Time + go func() { + startShutdown = time.Now() + eng.Shutdown() + close(done) + }() + time.Sleep(50 * time.Millisecond) + if err := eng.Job("foo").Run(); err == nil { + t.Fatalf("run on shutdown should fail: %#v", *eng) + } + <-done + // Verify that Shutdown() blocks for roughly 500ms, instead + // of returning almost instantly. + // + // We use >100ms to leave ample margin for race conditions between + // goroutines. It's possible (but unlikely in reasonable testing + // conditions), that this test will cause a false positive or false + // negative. But it's probably better than not having any test + // for the 99.999% of time where testing conditions are reasonable. + if d := time.Since(startShutdown); d.Nanoseconds() < jobDelayLow.Nanoseconds() { + t.Fatalf("shutdown did not block long enough: %v", d) + } else if d.Nanoseconds() > jobDelayHigh.Nanoseconds() { + t.Fatalf("shutdown blocked too long: %v", d) + } + if !completed { + t.Fatalf("job did not complete") + } +} diff --git a/tests/_vendor/src/github.com/docker/docker/engine/streams.go b/tests/Godeps/_workspace/src/github.com/docker/docker/engine/streams.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/engine/streams.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/engine/streams.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/engine/streams_test.go b/tests/Godeps/_workspace/src/github.com/docker/docker/engine/streams_test.go new file mode 100644 index 0000000000..5cfd5d0e6c --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/engine/streams_test.go @@ -0,0 +1,210 @@ +package engine + +import ( + "bufio" + "bytes" + "fmt" + "io" + "io/ioutil" + "strings" + "testing" +) + +type sentinelWriteCloser struct { + calledWrite bool + calledClose bool +} + +func (w *sentinelWriteCloser) Write(p []byte) (int, error) { + w.calledWrite = true + return len(p), nil +} + +func (w *sentinelWriteCloser) Close() error { + w.calledClose = true + return nil +} + +func TestOutputAddEnv(t *testing.T) { + input := "{\"foo\": \"bar\", \"answer_to_life_the_universe_and_everything\": 42}" + o := NewOutput() + result, err := o.AddEnv() + if err != nil { + t.Fatal(err) + } + o.Write([]byte(input)) + o.Close() + if v := result.Get("foo"); v != "bar" { + t.Errorf("Expected %v, got %v", "bar", v) + } + if v := result.GetInt("answer_to_life_the_universe_and_everything"); v != 42 { + t.Errorf("Expected %v, got %v", 42, v) + } + if v := result.Get("this-value-doesnt-exist"); v != "" { + t.Errorf("Expected %v, got %v", "", v) + } +} + +func TestOutputAddClose(t *testing.T) { + o := NewOutput() + var s sentinelWriteCloser + o.Add(&s) + if err := o.Close(); err != nil { + t.Fatal(err) + } + // Write data after the output is closed. + // Write should succeed, but no destination should receive it. + if _, err := o.Write([]byte("foo bar")); err != nil { + t.Fatal(err) + } + if !s.calledClose { + t.Fatal("Output.Close() didn't close the destination") + } +} + +func TestOutputAddPipe(t *testing.T) { + var testInputs = []string{ + "hello, world!", + "One\nTwo\nThree", + "", + "A line\nThen another nl-terminated line\n", + "A line followed by an empty line\n\n", + } + for _, input := range testInputs { + expectedOutput := input + o := NewOutput() + r, err := o.AddPipe() + if err != nil { + t.Fatal(err) + } + go func(o *Output) { + if n, err := o.Write([]byte(input)); err != nil { + t.Error(err) + } else if n != len(input) { + t.Errorf("Expected %d, got %d", len(input), n) + } + if err := o.Close(); err != nil { + t.Error(err) + } + }(o) + output, err := ioutil.ReadAll(r) + if err != nil { + t.Fatal(err) + } + if string(output) != expectedOutput { + t.Errorf("Last line is not stored as return string.\nExpected: '%s'\nGot: '%s'", expectedOutput, output) + } + } +} + +func TestTail(t *testing.T) { + var tests = make(map[string][]string) + tests["hello, world!"] = []string{ + "", + "hello, world!", + "hello, world!", + "hello, world!", + } + tests["One\nTwo\nThree"] = []string{ + "", + "Three", + "Two\nThree", + "One\nTwo\nThree", + } + for input, outputs := range tests { + for n, expectedOutput := range outputs { + output := Tail(bytes.NewBufferString(input), n) + if output != expectedOutput { + t.Errorf("Tail n=%d returned wrong result.\nExpected: '%s'\nGot : '%s'", n, expectedOutput, output) + } + } + } +} + +func lastLine(txt string) string { + scanner := bufio.NewScanner(strings.NewReader(txt)) + var lastLine string + for scanner.Scan() { + lastLine = scanner.Text() + } + return lastLine +} + +func TestOutputAdd(t *testing.T) { + o := NewOutput() + b := &bytes.Buffer{} + o.Add(b) + input := "hello, world!" + if n, err := o.Write([]byte(input)); err != nil { + t.Fatal(err) + } else if n != len(input) { + t.Fatalf("Expected %d, got %d", len(input), n) + } + if output := b.String(); output != input { + t.Fatalf("Received wrong data from Add.\nExpected: '%s'\nGot: '%s'", input, output) + } +} + +func TestOutputWriteError(t *testing.T) { + o := NewOutput() + buf := &bytes.Buffer{} + o.Add(buf) + r, w := io.Pipe() + input := "Hello there" + expectedErr := fmt.Errorf("This is an error") + r.CloseWithError(expectedErr) + o.Add(w) + n, err := o.Write([]byte(input)) + if err != expectedErr { + t.Fatalf("Output.Write() should return the first error encountered, if any") + } + if buf.String() != input { + t.Fatalf("Output.Write() should attempt write on all destinations, even after encountering an error") + } + if n != len(input) { + t.Fatalf("Output.Write() should return the size of the input if it successfully writes to at least one destination") + } +} + +func TestInputAddEmpty(t *testing.T) { + i := NewInput() + var b bytes.Buffer + if err := i.Add(&b); err != nil { + t.Fatal(err) + } + data, err := ioutil.ReadAll(i) + if err != nil { + t.Fatal(err) + } + if len(data) > 0 { + t.Fatalf("Read from empty input shoul yield no data") + } +} + +func TestInputAddTwo(t *testing.T) { + i := NewInput() + var b1 bytes.Buffer + // First add should succeed + if err := i.Add(&b1); err != nil { + t.Fatal(err) + } + var b2 bytes.Buffer + // Second add should fail + if err := i.Add(&b2); err == nil { + t.Fatalf("Adding a second source should return an error") + } +} + +func TestInputAddNotEmpty(t *testing.T) { + i := NewInput() + b := bytes.NewBufferString("hello world\nabc") + expectedResult := b.String() + i.Add(b) + result, err := ioutil.ReadAll(i) + if err != nil { + t.Fatal(err) + } + if string(result) != expectedResult { + t.Fatalf("Expected: %v\nReceived: %v", expectedResult, result) + } +} diff --git a/tests/_vendor/src/github.com/docker/docker/engine/table.go b/tests/Godeps/_workspace/src/github.com/docker/docker/engine/table.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/engine/table.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/engine/table.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/engine/table_test.go b/tests/Godeps/_workspace/src/github.com/docker/docker/engine/table_test.go new file mode 100644 index 0000000000..9a32ac9cdb --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/engine/table_test.go @@ -0,0 +1,112 @@ +package engine + +import ( + "bytes" + "encoding/json" + "testing" +) + +func TestTableWriteTo(t *testing.T) { + table := NewTable("", 0) + e := &Env{} + e.Set("foo", "bar") + table.Add(e) + var buf bytes.Buffer + if _, err := table.WriteTo(&buf); err != nil { + t.Fatal(err) + } + output := make(map[string]string) + if err := json.Unmarshal(buf.Bytes(), &output); err != nil { + t.Fatal(err) + } + if len(output) != 1 { + t.Fatalf("Incorrect output: %v", output) + } + if val, exists := output["foo"]; !exists || val != "bar" { + t.Fatalf("Inccorect output: %v", output) + } +} + +func TestTableSortStringValue(t *testing.T) { + table := NewTable("Key", 0) + + e := &Env{} + e.Set("Key", "A") + table.Add(e) + + e = &Env{} + e.Set("Key", "D") + table.Add(e) + + e = &Env{} + e.Set("Key", "B") + table.Add(e) + + e = &Env{} + e.Set("Key", "C") + table.Add(e) + + table.Sort() + + if len := table.Len(); len != 4 { + t.Fatalf("Expected 4, got %d", len) + } + + if value := table.Data[0].Get("Key"); value != "A" { + t.Fatalf("Expected A, got %s", value) + } + + if value := table.Data[1].Get("Key"); value != "B" { + t.Fatalf("Expected B, got %s", value) + } + + if value := table.Data[2].Get("Key"); value != "C" { + t.Fatalf("Expected C, got %s", value) + } + + if value := table.Data[3].Get("Key"); value != "D" { + t.Fatalf("Expected D, got %s", value) + } +} + +func TestTableReverseSortStringValue(t *testing.T) { + table := NewTable("Key", 0) + + e := &Env{} + e.Set("Key", "A") + table.Add(e) + + e = &Env{} + e.Set("Key", "D") + table.Add(e) + + e = &Env{} + e.Set("Key", "B") + table.Add(e) + + e = &Env{} + e.Set("Key", "C") + table.Add(e) + + table.ReverseSort() + + if len := table.Len(); len != 4 { + t.Fatalf("Expected 4, got %d", len) + } + + if value := table.Data[0].Get("Key"); value != "D" { + t.Fatalf("Expected D, got %s", value) + } + + if value := table.Data[1].Get("Key"); value != "C" { + t.Fatalf("Expected B, got %s", value) + } + + if value := table.Data[2].Get("Key"); value != "B" { + t.Fatalf("Expected C, got %s", value) + } + + if value := table.Data[3].Get("Key"); value != "A" { + t.Fatalf("Expected A, got %s", value) + } +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/graph/MAINTAINERS b/tests/Godeps/_workspace/src/github.com/docker/docker/graph/MAINTAINERS new file mode 100644 index 0000000000..e409454b5e --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/graph/MAINTAINERS @@ -0,0 +1,5 @@ +Solomon Hykes (@shykes) +Victor Vieux (@vieux) +Michael Crosby (@crosbymichael) +Cristian Staretu (@unclejack) +Tibor Vass (@tiborvass) diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/graph/export.go b/tests/Godeps/_workspace/src/github.com/docker/docker/graph/export.go new file mode 100644 index 0000000000..86dc5a342a --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/graph/export.go @@ -0,0 +1,168 @@ +package graph + +import ( + "encoding/json" + "io" + "io/ioutil" + "os" + "path" + + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/parsers" +) + +// CmdImageExport exports all images with the given tag. All versions +// containing the same tag are exported. The resulting output is an +// uncompressed tar ball. +// name is the set of tags to export. +// out is the writer where the images are written to. +func (s *TagStore) CmdImageExport(job *engine.Job) engine.Status { + if len(job.Args) < 1 { + return job.Errorf("Usage: %s IMAGE [IMAGE...]\n", job.Name) + } + // get image json + tempdir, err := ioutil.TempDir("", "docker-export-") + if err != nil { + return job.Error(err) + } + defer os.RemoveAll(tempdir) + + rootRepoMap := map[string]Repository{} + for _, name := range job.Args { + log.Debugf("Serializing %s", name) + rootRepo := s.Repositories[name] + if rootRepo != nil { + // this is a base repo name, like 'busybox' + for _, id := range rootRepo { + if _, ok := rootRepoMap[name]; !ok { + rootRepoMap[name] = rootRepo + } else { + log.Debugf("Duplicate key [%s]", name) + if rootRepoMap[name].Contains(rootRepo) { + log.Debugf("skipping, because it is present [%s:%q]", name, rootRepo) + continue + } + log.Debugf("updating [%s]: [%q] with [%q]", name, rootRepoMap[name], rootRepo) + rootRepoMap[name].Update(rootRepo) + } + + if err := s.exportImage(job.Eng, id, tempdir); err != nil { + return job.Error(err) + } + } + } else { + img, err := s.LookupImage(name) + if err != nil { + return job.Error(err) + } + + if img != nil { + // This is a named image like 'busybox:latest' + repoName, repoTag := parsers.ParseRepositoryTag(name) + + // check this length, because a lookup of a truncated has will not have a tag + // and will not need to be added to this map + if len(repoTag) > 0 { + if _, ok := rootRepoMap[repoName]; !ok { + rootRepoMap[repoName] = Repository{repoTag: img.ID} + } else { + log.Debugf("Duplicate key [%s]", repoName) + newRepo := Repository{repoTag: img.ID} + if rootRepoMap[repoName].Contains(newRepo) { + log.Debugf("skipping, because it is present [%s:%q]", repoName, newRepo) + continue + } + log.Debugf("updating [%s]: [%q] with [%q]", repoName, rootRepoMap[repoName], newRepo) + rootRepoMap[repoName].Update(newRepo) + } + } + if err := s.exportImage(job.Eng, img.ID, tempdir); err != nil { + return job.Error(err) + } + + } else { + // this must be an ID that didn't get looked up just right? + if err := s.exportImage(job.Eng, name, tempdir); err != nil { + return job.Error(err) + } + } + } + log.Debugf("End Serializing %s", name) + } + // write repositories, if there is something to write + if len(rootRepoMap) > 0 { + rootRepoJson, _ := json.Marshal(rootRepoMap) + if err := ioutil.WriteFile(path.Join(tempdir, "repositories"), rootRepoJson, os.FileMode(0644)); err != nil { + return job.Error(err) + } + } else { + log.Debugf("There were no repositories to write") + } + + fs, err := archive.Tar(tempdir, archive.Uncompressed) + if err != nil { + return job.Error(err) + } + defer fs.Close() + + if _, err := io.Copy(job.Stdout, fs); err != nil { + return job.Error(err) + } + log.Debugf("End export job: %s", job.Name) + return engine.StatusOK +} + +// FIXME: this should be a top-level function, not a class method +func (s *TagStore) exportImage(eng *engine.Engine, name, tempdir string) error { + for n := name; n != ""; { + // temporary directory + tmpImageDir := path.Join(tempdir, n) + if err := os.Mkdir(tmpImageDir, os.FileMode(0755)); err != nil { + if os.IsExist(err) { + return nil + } + return err + } + + var version = "1.0" + var versionBuf = []byte(version) + + if err := ioutil.WriteFile(path.Join(tmpImageDir, "VERSION"), versionBuf, os.FileMode(0644)); err != nil { + return err + } + + // serialize json + json, err := os.Create(path.Join(tmpImageDir, "json")) + if err != nil { + return err + } + job := eng.Job("image_inspect", n) + job.SetenvBool("raw", true) + job.Stdout.Add(json) + if err := job.Run(); err != nil { + return err + } + + // serialize filesystem + fsTar, err := os.Create(path.Join(tmpImageDir, "layer.tar")) + if err != nil { + return err + } + job = eng.Job("image_tarlayer", n) + job.Stdout.Add(fsTar) + if err := job.Run(); err != nil { + return err + } + + // find parent + job = eng.Job("image_get", n) + info, _ := job.Stdout.AddEnv() + if err := job.Run(); err != nil { + return err + } + n = info.Get("Parent") + } + return nil +} diff --git a/tests/_vendor/src/github.com/docker/docker/graph/graph.go b/tests/Godeps/_workspace/src/github.com/docker/docker/graph/graph.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/graph/graph.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/graph/graph.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/graph/history.go b/tests/Godeps/_workspace/src/github.com/docker/docker/graph/history.go new file mode 100644 index 0000000000..2030c4c789 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/graph/history.go @@ -0,0 +1,46 @@ +package graph + +import ( + "strings" + + "github.com/docker/docker/engine" + "github.com/docker/docker/image" +) + +func (s *TagStore) CmdHistory(job *engine.Job) engine.Status { + if n := len(job.Args); n != 1 { + return job.Errorf("Usage: %s IMAGE", job.Name) + } + name := job.Args[0] + foundImage, err := s.LookupImage(name) + if err != nil { + return job.Error(err) + } + + lookupMap := make(map[string][]string) + for name, repository := range s.Repositories { + for tag, id := range repository { + // If the ID already has a reverse lookup, do not update it unless for "latest" + if _, exists := lookupMap[id]; !exists { + lookupMap[id] = []string{} + } + lookupMap[id] = append(lookupMap[id], name+":"+tag) + } + } + + outs := engine.NewTable("Created", 0) + err = foundImage.WalkHistory(func(img *image.Image) error { + out := &engine.Env{} + out.Set("Id", img.ID) + out.SetInt64("Created", img.Created.Unix()) + out.Set("CreatedBy", strings.Join(img.ContainerConfig.Cmd, " ")) + out.SetList("Tags", lookupMap[img.ID]) + out.SetInt64("Size", img.Size) + outs.Add(out) + return nil + }) + if _, err := outs.WriteListTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/graph/import.go b/tests/Godeps/_workspace/src/github.com/docker/docker/graph/import.go new file mode 100644 index 0000000000..36d0d3fe10 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/graph/import.go @@ -0,0 +1,61 @@ +package graph + +import ( + "net/http" + "net/url" + + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/utils" +) + +func (s *TagStore) CmdImport(job *engine.Job) engine.Status { + if n := len(job.Args); n != 2 && n != 3 { + return job.Errorf("Usage: %s SRC REPO [TAG]", job.Name) + } + var ( + src = job.Args[0] + repo = job.Args[1] + tag string + sf = utils.NewStreamFormatter(job.GetenvBool("json")) + archive archive.ArchiveReader + resp *http.Response + ) + if len(job.Args) > 2 { + tag = job.Args[2] + } + + if src == "-" { + archive = job.Stdin + } else { + u, err := url.Parse(src) + if err != nil { + return job.Error(err) + } + if u.Scheme == "" { + u.Scheme = "http" + u.Host = src + u.Path = "" + } + job.Stdout.Write(sf.FormatStatus("", "Downloading from %s", u)) + resp, err = utils.Download(u.String()) + if err != nil { + return job.Error(err) + } + progressReader := utils.ProgressReader(resp.Body, int(resp.ContentLength), job.Stdout, sf, true, "", "Importing") + defer progressReader.Close() + archive = progressReader + } + img, err := s.graph.Create(archive, "", "", "Imported from "+src, "", nil, nil) + if err != nil { + return job.Error(err) + } + // Optionally register the image at REPO/TAG + if repo != "" { + if err := s.Set(repo, tag, img.ID, true); err != nil { + return job.Error(err) + } + } + job.Stdout.Write(sf.FormatStatus("", img.ID)) + return engine.StatusOK +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/graph/list.go b/tests/Godeps/_workspace/src/github.com/docker/docker/graph/list.go new file mode 100644 index 0000000000..0e0e97e447 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/graph/list.go @@ -0,0 +1,103 @@ +package graph + +import ( + "fmt" + "log" + "path" + "strings" + + "github.com/docker/docker/engine" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/parsers/filters" +) + +func (s *TagStore) CmdImages(job *engine.Job) engine.Status { + var ( + allImages map[string]*image.Image + err error + filt_tagged = true + ) + + imageFilters, err := filters.FromParam(job.Getenv("filters")) + if err != nil { + return job.Error(err) + } + if i, ok := imageFilters["dangling"]; ok { + for _, value := range i { + if strings.ToLower(value) == "true" { + filt_tagged = false + } + } + } + + if job.GetenvBool("all") && filt_tagged { + allImages, err = s.graph.Map() + } else { + allImages, err = s.graph.Heads() + } + if err != nil { + return job.Error(err) + } + lookup := make(map[string]*engine.Env) + s.Lock() + for name, repository := range s.Repositories { + if job.Getenv("filter") != "" { + if match, _ := path.Match(job.Getenv("filter"), name); !match { + continue + } + } + for tag, id := range repository { + image, err := s.graph.Get(id) + if err != nil { + log.Printf("Warning: couldn't load %s from %s/%s: %s", id, name, tag, err) + continue + } + + if out, exists := lookup[id]; exists { + if filt_tagged { + out.SetList("RepoTags", append(out.GetList("RepoTags"), fmt.Sprintf("%s:%s", name, tag))) + } + } else { + // get the boolean list for if only the untagged images are requested + delete(allImages, id) + if filt_tagged { + out := &engine.Env{} + out.Set("ParentId", image.Parent) + out.SetList("RepoTags", []string{fmt.Sprintf("%s:%s", name, tag)}) + out.Set("Id", image.ID) + out.SetInt64("Created", image.Created.Unix()) + out.SetInt64("Size", image.Size) + out.SetInt64("VirtualSize", image.GetParentsSize(0)+image.Size) + lookup[id] = out + } + } + + } + } + s.Unlock() + + outs := engine.NewTable("Created", len(lookup)) + for _, value := range lookup { + outs.Add(value) + } + + // Display images which aren't part of a repository/tag + if job.Getenv("filter") == "" { + for _, image := range allImages { + out := &engine.Env{} + out.Set("ParentId", image.Parent) + out.SetList("RepoTags", []string{":"}) + out.Set("Id", image.ID) + out.SetInt64("Created", image.Created.Unix()) + out.SetInt64("Size", image.Size) + out.SetInt64("VirtualSize", image.GetParentsSize(0)+image.Size) + outs.Add(out) + } + } + + outs.ReverseSort() + if _, err := outs.WriteListTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/graph/load.go b/tests/Godeps/_workspace/src/github.com/docker/docker/graph/load.go new file mode 100644 index 0000000000..753f31d2c9 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/graph/load.go @@ -0,0 +1,128 @@ +package graph + +import ( + "encoding/json" + "io" + "io/ioutil" + "os" + "path" + + "github.com/docker/docker/engine" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/log" +) + +// Loads a set of images into the repository. This is the complementary of ImageExport. +// The input stream is an uncompressed tar ball containing images and metadata. +func (s *TagStore) CmdLoad(job *engine.Job) engine.Status { + tmpImageDir, err := ioutil.TempDir("", "docker-import-") + if err != nil { + return job.Error(err) + } + defer os.RemoveAll(tmpImageDir) + + var ( + repoTarFile = path.Join(tmpImageDir, "repo.tar") + repoDir = path.Join(tmpImageDir, "repo") + ) + + tarFile, err := os.Create(repoTarFile) + if err != nil { + return job.Error(err) + } + if _, err := io.Copy(tarFile, job.Stdin); err != nil { + return job.Error(err) + } + tarFile.Close() + + repoFile, err := os.Open(repoTarFile) + if err != nil { + return job.Error(err) + } + if err := os.Mkdir(repoDir, os.ModeDir); err != nil { + return job.Error(err) + } + images, err := s.graph.Map() + if err != nil { + return job.Error(err) + } + excludes := make([]string, len(images)) + i := 0 + for k := range images { + excludes[i] = k + i++ + } + if err := archive.Untar(repoFile, repoDir, &archive.TarOptions{Excludes: excludes}); err != nil { + return job.Error(err) + } + + dirs, err := ioutil.ReadDir(repoDir) + if err != nil { + return job.Error(err) + } + + for _, d := range dirs { + if d.IsDir() { + if err := s.recursiveLoad(job.Eng, d.Name(), tmpImageDir); err != nil { + return job.Error(err) + } + } + } + + repositoriesJson, err := ioutil.ReadFile(path.Join(tmpImageDir, "repo", "repositories")) + if err == nil { + repositories := map[string]Repository{} + if err := json.Unmarshal(repositoriesJson, &repositories); err != nil { + return job.Error(err) + } + + for imageName, tagMap := range repositories { + for tag, address := range tagMap { + if err := s.Set(imageName, tag, address, true); err != nil { + return job.Error(err) + } + } + } + } else if !os.IsNotExist(err) { + return job.Error(err) + } + + return engine.StatusOK +} + +func (s *TagStore) recursiveLoad(eng *engine.Engine, address, tmpImageDir string) error { + if err := eng.Job("image_get", address).Run(); err != nil { + log.Debugf("Loading %s", address) + + imageJson, err := ioutil.ReadFile(path.Join(tmpImageDir, "repo", address, "json")) + if err != nil { + log.Debugf("Error reading json", err) + return err + } + + layer, err := os.Open(path.Join(tmpImageDir, "repo", address, "layer.tar")) + if err != nil { + log.Debugf("Error reading embedded tar", err) + return err + } + img, err := image.NewImgJSON(imageJson) + if err != nil { + log.Debugf("Error unmarshalling json", err) + return err + } + if img.Parent != "" { + if !s.graph.Exists(img.Parent) { + if err := s.recursiveLoad(eng, img.Parent, tmpImageDir); err != nil { + return err + } + } + } + if err := s.graph.Register(img, imageJson, layer); err != nil { + return err + } + } + log.Debugf("Completed processing %s", address) + + return nil +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/graph/pools_test.go b/tests/Godeps/_workspace/src/github.com/docker/docker/graph/pools_test.go new file mode 100644 index 0000000000..785a4bd122 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/graph/pools_test.go @@ -0,0 +1,41 @@ +package graph + +import "testing" + +func TestPools(t *testing.T) { + s := &TagStore{ + pullingPool: make(map[string]chan struct{}), + pushingPool: make(map[string]chan struct{}), + } + + if _, err := s.poolAdd("pull", "test1"); err != nil { + t.Fatal(err) + } + if _, err := s.poolAdd("pull", "test2"); err != nil { + t.Fatal(err) + } + if _, err := s.poolAdd("push", "test1"); err == nil || err.Error() != "pull test1 is already in progress" { + t.Fatalf("Expected `pull test1 is already in progress`") + } + if _, err := s.poolAdd("pull", "test1"); err == nil || err.Error() != "pull test1 is already in progress" { + t.Fatalf("Expected `pull test1 is already in progress`") + } + if _, err := s.poolAdd("wait", "test3"); err == nil || err.Error() != "Unknown pool type" { + t.Fatalf("Expected `Unknown pool type`") + } + if err := s.poolRemove("pull", "test2"); err != nil { + t.Fatal(err) + } + if err := s.poolRemove("pull", "test2"); err != nil { + t.Fatal(err) + } + if err := s.poolRemove("pull", "test1"); err != nil { + t.Fatal(err) + } + if err := s.poolRemove("push", "test1"); err != nil { + t.Fatal(err) + } + if err := s.poolRemove("wait", "test3"); err == nil || err.Error() != "Unknown pool type" { + t.Fatalf("Expected `Unknown pool type`") + } +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/graph/pull.go b/tests/Godeps/_workspace/src/github.com/docker/docker/graph/pull.go new file mode 100644 index 0000000000..5d7e84ed72 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/graph/pull.go @@ -0,0 +1,601 @@ +package graph + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "net/url" + "os" + "strings" + "time" + + "github.com/docker/docker/engine" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/registry" + "github.com/docker/docker/utils" + "github.com/docker/libtrust" +) + +func (s *TagStore) verifyManifest(eng *engine.Engine, manifestBytes []byte) (*registry.ManifestData, bool, error) { + sig, err := libtrust.ParsePrettySignature(manifestBytes, "signatures") + if err != nil { + return nil, false, fmt.Errorf("error parsing payload: %s", err) + } + keys, err := sig.Verify() + if err != nil { + return nil, false, fmt.Errorf("error verifying payload: %s", err) + } + + payload, err := sig.Payload() + if err != nil { + return nil, false, fmt.Errorf("error retrieving payload: %s", err) + } + + var manifest registry.ManifestData + if err := json.Unmarshal(payload, &manifest); err != nil { + return nil, false, fmt.Errorf("error unmarshalling manifest: %s", err) + } + if manifest.SchemaVersion != 1 { + return nil, false, fmt.Errorf("unsupported schema version: %d", manifest.SchemaVersion) + } + + var verified bool + for _, key := range keys { + job := eng.Job("trust_key_check") + b, err := key.MarshalJSON() + if err != nil { + return nil, false, fmt.Errorf("error marshalling public key: %s", err) + } + namespace := manifest.Name + if namespace[0] != '/' { + namespace = "/" + namespace + } + stdoutBuffer := bytes.NewBuffer(nil) + + job.Args = append(job.Args, namespace) + job.Setenv("PublicKey", string(b)) + // Check key has read/write permission (0x03) + job.SetenvInt("Permission", 0x03) + job.Stdout.Add(stdoutBuffer) + if err = job.Run(); err != nil { + return nil, false, fmt.Errorf("error running key check: %s", err) + } + result := engine.Tail(stdoutBuffer, 1) + log.Debugf("Key check result: %q", result) + if result == "verified" { + verified = true + } + } + + return &manifest, verified, nil +} + +func (s *TagStore) CmdPull(job *engine.Job) engine.Status { + if n := len(job.Args); n != 1 && n != 2 { + return job.Errorf("Usage: %s IMAGE [TAG]", job.Name) + } + + var ( + localName = job.Args[0] + tag string + sf = utils.NewStreamFormatter(job.GetenvBool("json")) + authConfig = ®istry.AuthConfig{} + metaHeaders map[string][]string + mirrors []string + ) + + if len(job.Args) > 1 { + tag = job.Args[1] + } + + job.GetenvJson("authConfig", authConfig) + job.GetenvJson("metaHeaders", &metaHeaders) + + c, err := s.poolAdd("pull", localName+":"+tag) + if err != nil { + if c != nil { + // Another pull of the same repository is already taking place; just wait for it to finish + job.Stdout.Write(sf.FormatStatus("", "Repository %s already being pulled by another client. Waiting.", localName)) + <-c + return engine.StatusOK + } + return job.Error(err) + } + defer s.poolRemove("pull", localName+":"+tag) + + // Resolve the Repository name from fqn to endpoint + name + hostname, remoteName, err := registry.ResolveRepositoryName(localName) + if err != nil { + return job.Error(err) + } + + endpoint, err := registry.NewEndpoint(hostname) + if err != nil { + return job.Error(err) + } + + r, err := registry.NewSession(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint, true) + if err != nil { + return job.Error(err) + } + + var isOfficial bool + if endpoint.VersionString(1) == registry.IndexServerAddress() { + // If pull "index.docker.io/foo/bar", it's stored locally under "foo/bar" + localName = remoteName + + isOfficial = isOfficialName(remoteName) + if isOfficial && strings.IndexRune(remoteName, '/') == -1 { + remoteName = "library/" + remoteName + } + + // Use provided mirrors, if any + mirrors = s.mirrors + } + + if isOfficial || endpoint.Version == registry.APIVersion2 { + j := job.Eng.Job("trust_update_base") + if err = j.Run(); err != nil { + return job.Errorf("error updating trust base graph: %s", err) + } + + if err := s.pullV2Repository(job.Eng, r, job.Stdout, localName, remoteName, tag, sf, job.GetenvBool("parallel")); err == nil { + return engine.StatusOK + } else if err != registry.ErrDoesNotExist { + log.Errorf("Error from V2 registry: %s", err) + } + } + + if err = s.pullRepository(r, job.Stdout, localName, remoteName, tag, sf, job.GetenvBool("parallel"), mirrors); err != nil { + return job.Error(err) + } + + return engine.StatusOK +} + +func (s *TagStore) pullRepository(r *registry.Session, out io.Writer, localName, remoteName, askedTag string, sf *utils.StreamFormatter, parallel bool, mirrors []string) error { + out.Write(sf.FormatStatus("", "Pulling repository %s", localName)) + + repoData, err := r.GetRepositoryData(remoteName) + if err != nil { + if strings.Contains(err.Error(), "HTTP code: 404") { + return fmt.Errorf("Error: image %s not found", remoteName) + } + // Unexpected HTTP error + return err + } + + log.Debugf("Retrieving the tag list") + tagsList, err := r.GetRemoteTags(repoData.Endpoints, remoteName, repoData.Tokens) + if err != nil { + log.Errorf("%v", err) + return err + } + + for tag, id := range tagsList { + repoData.ImgList[id] = ®istry.ImgData{ + ID: id, + Tag: tag, + Checksum: "", + } + } + + log.Debugf("Registering tags") + // If no tag has been specified, pull them all + var imageId string + if askedTag == "" { + for tag, id := range tagsList { + repoData.ImgList[id].Tag = tag + } + } else { + // Otherwise, check that the tag exists and use only that one + id, exists := tagsList[askedTag] + if !exists { + return fmt.Errorf("Tag %s not found in repository %s", askedTag, localName) + } + imageId = id + repoData.ImgList[id].Tag = askedTag + } + + errors := make(chan error) + + layers_downloaded := false + for _, image := range repoData.ImgList { + downloadImage := func(img *registry.ImgData) { + if askedTag != "" && img.Tag != askedTag { + log.Debugf("(%s) does not match %s (id: %s), skipping", img.Tag, askedTag, img.ID) + if parallel { + errors <- nil + } + return + } + + if img.Tag == "" { + log.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID) + if parallel { + errors <- nil + } + return + } + + // ensure no two downloads of the same image happen at the same time + if c, err := s.poolAdd("pull", "img:"+img.ID); err != nil { + if c != nil { + out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil)) + <-c + out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil)) + } else { + log.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err) + } + if parallel { + errors <- nil + } + return + } + defer s.poolRemove("pull", "img:"+img.ID) + + out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s", img.Tag, localName), nil)) + success := false + var lastErr, err error + var is_downloaded bool + if mirrors != nil { + for _, ep := range mirrors { + out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, mirror: %s", img.Tag, localName, ep), nil)) + if is_downloaded, err = s.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil { + // Don't report errors when pulling from mirrors. + log.Debugf("Error pulling image (%s) from %s, mirror: %s, %s", img.Tag, localName, ep, err) + continue + } + layers_downloaded = layers_downloaded || is_downloaded + success = true + break + } + } + if !success { + for _, ep := range repoData.Endpoints { + out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, endpoint: %s", img.Tag, localName, ep), nil)) + if is_downloaded, err = s.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil { + // It's not ideal that only the last error is returned, it would be better to concatenate the errors. + // As the error is also given to the output stream the user will see the error. + lastErr = err + out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, localName, ep, err), nil)) + continue + } + layers_downloaded = layers_downloaded || is_downloaded + success = true + break + } + } + if !success { + err := fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, localName, lastErr) + out.Write(sf.FormatProgress(utils.TruncateID(img.ID), err.Error(), nil)) + if parallel { + errors <- err + return + } + } + out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil)) + + if parallel { + errors <- nil + } + } + + if parallel { + go downloadImage(image) + } else { + downloadImage(image) + } + } + if parallel { + var lastError error + for i := 0; i < len(repoData.ImgList); i++ { + if err := <-errors; err != nil { + lastError = err + } + } + if lastError != nil { + return lastError + } + + } + for tag, id := range tagsList { + if askedTag != "" && id != imageId { + continue + } + if err := s.Set(localName, tag, id, true); err != nil { + return err + } + } + + requestedTag := localName + if len(askedTag) > 0 { + requestedTag = localName + ":" + askedTag + } + WriteStatus(requestedTag, out, sf, layers_downloaded) + return nil +} + +func (s *TagStore) pullImage(r *registry.Session, out io.Writer, imgID, endpoint string, token []string, sf *utils.StreamFormatter) (bool, error) { + history, err := r.GetRemoteHistory(imgID, endpoint, token) + if err != nil { + return false, err + } + out.Write(sf.FormatProgress(utils.TruncateID(imgID), "Pulling dependent layers", nil)) + // FIXME: Try to stream the images? + // FIXME: Launch the getRemoteImage() in goroutines + + layers_downloaded := false + for i := len(history) - 1; i >= 0; i-- { + id := history[i] + + // ensure no two downloads of the same layer happen at the same time + if c, err := s.poolAdd("pull", "layer:"+id); err != nil { + log.Debugf("Image (id: %s) pull is already running, skipping: %v", id, err) + <-c + } + defer s.poolRemove("pull", "layer:"+id) + + if !s.graph.Exists(id) { + out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling metadata", nil)) + var ( + imgJSON []byte + imgSize int + err error + img *image.Image + ) + retries := 5 + for j := 1; j <= retries; j++ { + imgJSON, imgSize, err = r.GetRemoteImageJSON(id, endpoint, token) + if err != nil && j == retries { + out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil)) + return layers_downloaded, err + } else if err != nil { + time.Sleep(time.Duration(j) * 500 * time.Millisecond) + continue + } + img, err = image.NewImgJSON(imgJSON) + layers_downloaded = true + if err != nil && j == retries { + out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil)) + return layers_downloaded, fmt.Errorf("Failed to parse json: %s", err) + } else if err != nil { + time.Sleep(time.Duration(j) * 500 * time.Millisecond) + continue + } else { + break + } + } + + for j := 1; j <= retries; j++ { + // Get the layer + status := "Pulling fs layer" + if j > 1 { + status = fmt.Sprintf("Pulling fs layer [retries: %d]", j) + } + out.Write(sf.FormatProgress(utils.TruncateID(id), status, nil)) + layer, err := r.GetRemoteImageLayer(img.ID, endpoint, token, int64(imgSize)) + if uerr, ok := err.(*url.Error); ok { + err = uerr.Err + } + if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries { + time.Sleep(time.Duration(j) * 500 * time.Millisecond) + continue + } else if err != nil { + out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil)) + return layers_downloaded, err + } + layers_downloaded = true + defer layer.Close() + + err = s.graph.Register(img, imgJSON, + utils.ProgressReader(layer, imgSize, out, sf, false, utils.TruncateID(id), "Downloading")) + if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries { + time.Sleep(time.Duration(j) * 500 * time.Millisecond) + continue + } else if err != nil { + out.Write(sf.FormatProgress(utils.TruncateID(id), "Error downloading dependent layers", nil)) + return layers_downloaded, err + } else { + break + } + } + } + out.Write(sf.FormatProgress(utils.TruncateID(id), "Download complete", nil)) + } + return layers_downloaded, nil +} + +func WriteStatus(requestedTag string, out io.Writer, sf *utils.StreamFormatter, layers_downloaded bool) { + if layers_downloaded { + out.Write(sf.FormatStatus("", "Status: Downloaded newer image for %s", requestedTag)) + } else { + out.Write(sf.FormatStatus("", "Status: Image is up to date for %s", requestedTag)) + } +} + +// downloadInfo is used to pass information from download to extractor +type downloadInfo struct { + imgJSON []byte + img *image.Image + tmpFile *os.File + length int64 + downloaded bool + err chan error +} + +func (s *TagStore) pullV2Repository(eng *engine.Engine, r *registry.Session, out io.Writer, localName, remoteName, tag string, sf *utils.StreamFormatter, parallel bool) error { + var layersDownloaded bool + if tag == "" { + log.Debugf("Pulling tag list from V2 registry for %s", remoteName) + tags, err := r.GetV2RemoteTags(remoteName, nil) + if err != nil { + return err + } + for _, t := range tags { + if downloaded, err := s.pullV2Tag(eng, r, out, localName, remoteName, t, sf, parallel); err != nil { + return err + } else if downloaded { + layersDownloaded = true + } + } + } else { + if downloaded, err := s.pullV2Tag(eng, r, out, localName, remoteName, tag, sf, parallel); err != nil { + return err + } else if downloaded { + layersDownloaded = true + } + } + + requestedTag := localName + if len(tag) > 0 { + requestedTag = localName + ":" + tag + } + WriteStatus(requestedTag, out, sf, layersDownloaded) + return nil +} + +func (s *TagStore) pullV2Tag(eng *engine.Engine, r *registry.Session, out io.Writer, localName, remoteName, tag string, sf *utils.StreamFormatter, parallel bool) (bool, error) { + log.Debugf("Pulling tag from V2 registry: %q", tag) + manifestBytes, err := r.GetV2ImageManifest(remoteName, tag, nil) + if err != nil { + return false, err + } + + manifest, verified, err := s.verifyManifest(eng, manifestBytes) + if err != nil { + return false, fmt.Errorf("error verifying manifest: %s", err) + } + + if len(manifest.FSLayers) != len(manifest.History) { + return false, fmt.Errorf("length of history not equal to number of layers") + } + + if verified { + out.Write(sf.FormatStatus(localName+":"+tag, "The image you are pulling has been verified")) + } else { + out.Write(sf.FormatStatus(tag, "Pulling from %s", localName)) + } + + if len(manifest.FSLayers) == 0 { + return false, fmt.Errorf("no blobSums in manifest") + } + + downloads := make([]downloadInfo, len(manifest.FSLayers)) + + for i := len(manifest.FSLayers) - 1; i >= 0; i-- { + var ( + sumStr = manifest.FSLayers[i].BlobSum + imgJSON = []byte(manifest.History[i].V1Compatibility) + ) + + img, err := image.NewImgJSON(imgJSON) + if err != nil { + return false, fmt.Errorf("failed to parse json: %s", err) + } + downloads[i].img = img + + // Check if exists + if s.graph.Exists(img.ID) { + log.Debugf("Image already exists: %s", img.ID) + continue + } + + chunks := strings.SplitN(sumStr, ":", 2) + if len(chunks) < 2 { + return false, fmt.Errorf("expected 2 parts in the sumStr, got %#v", chunks) + } + sumType, checksum := chunks[0], chunks[1] + out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Pulling fs layer", nil)) + + downloadFunc := func(di *downloadInfo) error { + log.Debugf("pulling blob %q to V1 img %s", sumStr, img.ID) + + if c, err := s.poolAdd("pull", "img:"+img.ID); err != nil { + if c != nil { + out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil)) + <-c + out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil)) + } else { + log.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err) + } + } else { + defer s.poolRemove("pull", "img:"+img.ID) + tmpFile, err := ioutil.TempFile("", "GetV2ImageBlob") + if err != nil { + return err + } + + r, l, err := r.GetV2ImageBlobReader(remoteName, sumType, checksum, nil) + if err != nil { + return err + } + defer r.Close() + io.Copy(tmpFile, utils.ProgressReader(r, int(l), out, sf, false, utils.TruncateID(img.ID), "Downloading")) + + out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil)) + + log.Debugf("Downloaded %s to tempfile %s", img.ID, tmpFile.Name()) + di.tmpFile = tmpFile + di.length = l + di.downloaded = true + } + di.imgJSON = imgJSON + + return nil + } + + if parallel { + downloads[i].err = make(chan error) + go func(di *downloadInfo) { + di.err <- downloadFunc(di) + }(&downloads[i]) + } else { + err := downloadFunc(&downloads[i]) + if err != nil { + return false, err + } + } + } + + var layersDownloaded bool + for i := len(downloads) - 1; i >= 0; i-- { + d := &downloads[i] + if d.err != nil { + err := <-d.err + if err != nil { + return false, err + } + } + if d.downloaded { + // if tmpFile is empty assume download and extracted elsewhere + defer os.Remove(d.tmpFile.Name()) + defer d.tmpFile.Close() + d.tmpFile.Seek(0, 0) + if d.tmpFile != nil { + err = s.graph.Register(d.img, d.imgJSON, + utils.ProgressReader(d.tmpFile, int(d.length), out, sf, false, utils.TruncateID(d.img.ID), "Extracting")) + if err != nil { + return false, err + } + + // FIXME: Pool release here for parallel tag pull (ensures any downloads block until fully extracted) + } + out.Write(sf.FormatProgress(utils.TruncateID(d.img.ID), "Pull complete", nil)) + layersDownloaded = true + } else { + out.Write(sf.FormatProgress(utils.TruncateID(d.img.ID), "Already exists", nil)) + } + + } + + if err = s.Set(localName, tag, downloads[0].img.ID, true); err != nil { + return false, err + } + + return layersDownloaded, nil +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/graph/push.go b/tests/Godeps/_workspace/src/github.com/docker/docker/graph/push.go new file mode 100644 index 0000000000..3511245b30 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/graph/push.go @@ -0,0 +1,250 @@ +package graph + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path" + + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/registry" + "github.com/docker/docker/utils" +) + +// Retrieve the all the images to be uploaded in the correct order +func (s *TagStore) getImageList(localRepo map[string]string, requestedTag string) ([]string, map[string][]string, error) { + var ( + imageList []string + imagesSeen = make(map[string]bool) + tagsByImage = make(map[string][]string) + ) + + for tag, id := range localRepo { + if requestedTag != "" && requestedTag != tag { + continue + } + var imageListForThisTag []string + + tagsByImage[id] = append(tagsByImage[id], tag) + + for img, err := s.graph.Get(id); img != nil; img, err = img.GetParent() { + if err != nil { + return nil, nil, err + } + + if imagesSeen[img.ID] { + // This image is already on the list, we can ignore it and all its parents + break + } + + imagesSeen[img.ID] = true + imageListForThisTag = append(imageListForThisTag, img.ID) + } + + // reverse the image list for this tag (so the "most"-parent image is first) + for i, j := 0, len(imageListForThisTag)-1; i < j; i, j = i+1, j-1 { + imageListForThisTag[i], imageListForThisTag[j] = imageListForThisTag[j], imageListForThisTag[i] + } + + // append to main image list + imageList = append(imageList, imageListForThisTag...) + } + if len(imageList) == 0 { + return nil, nil, fmt.Errorf("No images found for the requested repository / tag") + } + log.Debugf("Image list: %v", imageList) + log.Debugf("Tags by image: %v", tagsByImage) + + return imageList, tagsByImage, nil +} + +func (s *TagStore) pushRepository(r *registry.Session, out io.Writer, localName, remoteName string, localRepo map[string]string, tag string, sf *utils.StreamFormatter) error { + out = utils.NewWriteFlusher(out) + log.Debugf("Local repo: %s", localRepo) + imgList, tagsByImage, err := s.getImageList(localRepo, tag) + if err != nil { + return err + } + + out.Write(sf.FormatStatus("", "Sending image list")) + + var ( + repoData *registry.RepositoryData + imageIndex []*registry.ImgData + ) + + for _, imgId := range imgList { + if tags, exists := tagsByImage[imgId]; exists { + // If an image has tags you must add an entry in the image index + // for each tag + for _, tag := range tags { + imageIndex = append(imageIndex, ®istry.ImgData{ + ID: imgId, + Tag: tag, + }) + } + } else { + // If the image does not have a tag it still needs to be sent to the + // registry with an empty tag so that it is accociated with the repository + imageIndex = append(imageIndex, ®istry.ImgData{ + ID: imgId, + Tag: "", + }) + + } + } + + log.Debugf("Preparing to push %s with the following images and tags", localRepo) + for _, data := range imageIndex { + log.Debugf("Pushing ID: %s with Tag: %s", data.ID, data.Tag) + } + + // Register all the images in a repository with the registry + // If an image is not in this list it will not be associated with the repository + repoData, err = r.PushImageJSONIndex(remoteName, imageIndex, false, nil) + if err != nil { + return err + } + + nTag := 1 + if tag == "" { + nTag = len(localRepo) + } + for _, ep := range repoData.Endpoints { + out.Write(sf.FormatStatus("", "Pushing repository %s (%d tags)", localName, nTag)) + + for _, imgId := range imgList { + if r.LookupRemoteImage(imgId, ep, repoData.Tokens) { + out.Write(sf.FormatStatus("", "Image %s already pushed, skipping", utils.TruncateID(imgId))) + } else { + if _, err := s.pushImage(r, out, remoteName, imgId, ep, repoData.Tokens, sf); err != nil { + // FIXME: Continue on error? + return err + } + } + + for _, tag := range tagsByImage[imgId] { + out.Write(sf.FormatStatus("", "Pushing tag for rev [%s] on {%s}", utils.TruncateID(imgId), ep+"repositories/"+remoteName+"/tags/"+tag)) + + if err := r.PushRegistryTag(remoteName, imgId, tag, ep, repoData.Tokens); err != nil { + return err + } + } + } + } + + if _, err := r.PushImageJSONIndex(remoteName, imageIndex, true, repoData.Endpoints); err != nil { + return err + } + + return nil +} + +func (s *TagStore) pushImage(r *registry.Session, out io.Writer, remote, imgID, ep string, token []string, sf *utils.StreamFormatter) (checksum string, err error) { + out = utils.NewWriteFlusher(out) + jsonRaw, err := ioutil.ReadFile(path.Join(s.graph.Root, imgID, "json")) + if err != nil { + return "", fmt.Errorf("Cannot retrieve the path for {%s}: %s", imgID, err) + } + out.Write(sf.FormatProgress(utils.TruncateID(imgID), "Pushing", nil)) + + imgData := ®istry.ImgData{ + ID: imgID, + } + + // Send the json + if err := r.PushImageJSONRegistry(imgData, jsonRaw, ep, token); err != nil { + if err == registry.ErrAlreadyExists { + out.Write(sf.FormatProgress(utils.TruncateID(imgData.ID), "Image already pushed, skipping", nil)) + return "", nil + } + return "", err + } + + layerData, err := s.graph.TempLayerArchive(imgID, archive.Uncompressed, sf, out) + if err != nil { + return "", fmt.Errorf("Failed to generate layer archive: %s", err) + } + defer os.RemoveAll(layerData.Name()) + + // Send the layer + log.Debugf("rendered layer for %s of [%d] size", imgData.ID, layerData.Size) + + checksum, checksumPayload, err := r.PushImageLayerRegistry(imgData.ID, utils.ProgressReader(layerData, int(layerData.Size), out, sf, false, utils.TruncateID(imgData.ID), "Pushing"), ep, token, jsonRaw) + if err != nil { + return "", err + } + imgData.Checksum = checksum + imgData.ChecksumPayload = checksumPayload + // Send the checksum + if err := r.PushImageChecksumRegistry(imgData, ep, token); err != nil { + return "", err + } + + out.Write(sf.FormatProgress(utils.TruncateID(imgData.ID), "Image successfully pushed", nil)) + return imgData.Checksum, nil +} + +// FIXME: Allow to interrupt current push when new push of same image is done. +func (s *TagStore) CmdPush(job *engine.Job) engine.Status { + if n := len(job.Args); n != 1 { + return job.Errorf("Usage: %s IMAGE", job.Name) + } + var ( + localName = job.Args[0] + sf = utils.NewStreamFormatter(job.GetenvBool("json")) + authConfig = ®istry.AuthConfig{} + metaHeaders map[string][]string + ) + + tag := job.Getenv("tag") + job.GetenvJson("authConfig", authConfig) + job.GetenvJson("metaHeaders", &metaHeaders) + if _, err := s.poolAdd("push", localName); err != nil { + return job.Error(err) + } + defer s.poolRemove("push", localName) + + // Resolve the Repository name from fqn to endpoint + name + hostname, remoteName, err := registry.ResolveRepositoryName(localName) + if err != nil { + return job.Error(err) + } + + endpoint, err := registry.NewEndpoint(hostname) + if err != nil { + return job.Error(err) + } + + img, err := s.graph.Get(localName) + r, err2 := registry.NewSession(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint, false) + if err2 != nil { + return job.Error(err2) + } + + if err != nil { + reposLen := 1 + if tag == "" { + reposLen = len(s.Repositories[localName]) + } + job.Stdout.Write(sf.FormatStatus("", "The push refers to a repository [%s] (len: %d)", localName, reposLen)) + // If it fails, try to get the repository + if localRepo, exists := s.Repositories[localName]; exists { + if err := s.pushRepository(r, job.Stdout, localName, remoteName, localRepo, tag, sf); err != nil { + return job.Error(err) + } + return engine.StatusOK + } + return job.Error(err) + } + + var token []string + job.Stdout.Write(sf.FormatStatus("", "The push refers to an image: [%s]", localName)) + if _, err := s.pushImage(r, job.Stdout, remoteName, img.ID, endpoint.String(), token, sf); err != nil { + return job.Error(err) + } + return engine.StatusOK +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/graph/service.go b/tests/Godeps/_workspace/src/github.com/docker/docker/graph/service.go new file mode 100644 index 0000000000..1be986f8d5 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/graph/service.go @@ -0,0 +1,182 @@ +package graph + +import ( + "fmt" + "io" + + "github.com/docker/docker/engine" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/log" +) + +func (s *TagStore) Install(eng *engine.Engine) error { + for name, handler := range map[string]engine.Handler{ + "image_set": s.CmdSet, + "image_tag": s.CmdTag, + "tag": s.CmdTagLegacy, // FIXME merge with "image_tag" + "image_get": s.CmdGet, + "image_inspect": s.CmdLookup, + "image_tarlayer": s.CmdTarLayer, + "image_export": s.CmdImageExport, + "history": s.CmdHistory, + "images": s.CmdImages, + "viz": s.CmdViz, + "load": s.CmdLoad, + "import": s.CmdImport, + "pull": s.CmdPull, + "push": s.CmdPush, + } { + if err := eng.Register(name, handler); err != nil { + return fmt.Errorf("Could not register %q: %v", name, err) + } + } + return nil +} + +// CmdSet stores a new image in the graph. +// Images are stored in the graph using 4 elements: +// - A user-defined ID +// - A collection of metadata describing the image +// - A directory tree stored as a tar archive (also called the "layer") +// - A reference to a "parent" ID on top of which the layer should be applied +// +// NOTE: even though the parent ID is only useful in relation to the layer and how +// to apply it (ie you could represent the full directory tree as 'parent_layer + layer', +// it is treated as a top-level property of the image. This is an artifact of early +// design and should probably be cleaned up in the future to simplify the design. +// +// Syntax: image_set ID +// Input: +// - Layer content must be streamed in tar format on stdin. An empty input is +// valid and represents a nil layer. +// +// - Image metadata must be passed in the command environment. +// 'json': a json-encoded object with all image metadata. +// It will be stored as-is, without any encoding/decoding artifacts. +// That is a requirement of the current registry client implementation, +// because a re-encoded json might invalidate the image checksum at +// the next upload, even with functionaly identical content. +func (s *TagStore) CmdSet(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("usage: %s NAME", job.Name) + } + var ( + imgJSON = []byte(job.Getenv("json")) + layer = job.Stdin + ) + if len(imgJSON) == 0 { + return job.Errorf("mandatory key 'json' is not set") + } + // We have to pass an *image.Image object, even though it will be completely + // ignored in favor of the redundant json data. + // FIXME: the current prototype of Graph.Register is stupid and redundant. + img, err := image.NewImgJSON(imgJSON) + if err != nil { + return job.Error(err) + } + if err := s.graph.Register(img, imgJSON, layer); err != nil { + return job.Error(err) + } + return engine.StatusOK +} + +// CmdGet returns information about an image. +// If the image doesn't exist, an empty object is returned, to allow +// checking for an image's existence. +func (s *TagStore) CmdGet(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("usage: %s NAME", job.Name) + } + name := job.Args[0] + res := &engine.Env{} + img, err := s.LookupImage(name) + // Note: if the image doesn't exist, LookupImage returns + // nil, nil. + if err != nil { + return job.Error(err) + } + if img != nil { + // We don't directly expose all fields of the Image objects, + // to maintain a clean public API which we can maintain over + // time even if the underlying structure changes. + // We should have done this with the Image object to begin with... + // but we didn't, so now we're doing it here. + // + // Fields that we're probably better off not including: + // - Config/ContainerConfig. Those structs have the same sprawl problem, + // so we shouldn't include them wholesale either. + // - Comment: initially created to fulfill the "every image is a git commit" + // metaphor, in practice people either ignore it or use it as a + // generic description field which it isn't. On deprecation shortlist. + res.SetAuto("Created", img.Created) + res.Set("Author", img.Author) + res.Set("Os", img.OS) + res.Set("Architecture", img.Architecture) + res.Set("DockerVersion", img.DockerVersion) + res.Set("Id", img.ID) + res.Set("Parent", img.Parent) + } + res.WriteTo(job.Stdout) + return engine.StatusOK +} + +// CmdLookup return an image encoded in JSON +func (s *TagStore) CmdLookup(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("usage: %s NAME", job.Name) + } + name := job.Args[0] + if image, err := s.LookupImage(name); err == nil && image != nil { + if job.GetenvBool("raw") { + b, err := image.RawJson() + if err != nil { + return job.Error(err) + } + job.Stdout.Write(b) + return engine.StatusOK + } + + out := &engine.Env{} + out.Set("Id", image.ID) + out.Set("Parent", image.Parent) + out.Set("Comment", image.Comment) + out.SetAuto("Created", image.Created) + out.Set("Container", image.Container) + out.SetJson("ContainerConfig", image.ContainerConfig) + out.Set("DockerVersion", image.DockerVersion) + out.Set("Author", image.Author) + out.SetJson("Config", image.Config) + out.Set("Architecture", image.Architecture) + out.Set("Os", image.OS) + out.SetInt64("Size", image.Size) + out.SetInt64("VirtualSize", image.GetParentsSize(0)+image.Size) + if _, err = out.WriteTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK + } + return job.Errorf("No such image: %s", name) +} + +// CmdTarLayer return the tarLayer of the image +func (s *TagStore) CmdTarLayer(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("usage: %s NAME", job.Name) + } + name := job.Args[0] + if image, err := s.LookupImage(name); err == nil && image != nil { + fs, err := image.TarLayer() + if err != nil { + return job.Error(err) + } + defer fs.Close() + + written, err := io.Copy(job.Stdout, fs) + if err != nil { + return job.Error(err) + } + log.Debugf("rendered layer for %s of [%d] size", image.ID, written) + return engine.StatusOK + } + return job.Errorf("No such image: %s", name) +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/graph/tag.go b/tests/Godeps/_workspace/src/github.com/docker/docker/graph/tag.go new file mode 100644 index 0000000000..3d89422f9d --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/graph/tag.go @@ -0,0 +1,44 @@ +package graph + +import ( + "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/parsers" +) + +// CmdTag assigns a new name and tag to an existing image. If the tag already exists, +// it is changed and the image previously referenced by the tag loses that reference. +// This may cause the old image to be garbage-collected if its reference count reaches zero. +// +// Syntax: image_tag NEWNAME OLDNAME +// Example: image_tag shykes/myapp:latest shykes/myapp:1.42.0 +func (s *TagStore) CmdTag(job *engine.Job) engine.Status { + if len(job.Args) != 2 { + return job.Errorf("usage: %s NEWNAME OLDNAME", job.Name) + } + var ( + newName = job.Args[0] + oldName = job.Args[1] + ) + newRepo, newTag := parsers.ParseRepositoryTag(newName) + // FIXME: Set should either parse both old and new name, or neither. + // the current prototype is inconsistent. + if err := s.Set(newRepo, newTag, oldName, true); err != nil { + return job.Error(err) + } + return engine.StatusOK +} + +// FIXME: merge into CmdTag above, and merge "image_tag" and "tag" into a single job. +func (s *TagStore) CmdTagLegacy(job *engine.Job) engine.Status { + if len(job.Args) != 2 && len(job.Args) != 3 { + return job.Errorf("Usage: %s IMAGE REPOSITORY [TAG]\n", job.Name) + } + var tag string + if len(job.Args) == 3 { + tag = job.Args[2] + } + if err := s.Set(job.Args[1], tag, job.Args[0], job.GetenvBool("force")); err != nil { + return job.Error(err) + } + return engine.StatusOK +} diff --git a/tests/_vendor/src/github.com/docker/docker/graph/tags.go b/tests/Godeps/_workspace/src/github.com/docker/docker/graph/tags.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/graph/tags.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/graph/tags.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/graph/tags_unit_test.go b/tests/Godeps/_workspace/src/github.com/docker/docker/graph/tags_unit_test.go new file mode 100644 index 0000000000..e4f1fb809f --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/graph/tags_unit_test.go @@ -0,0 +1,150 @@ +package graph + +import ( + "bytes" + "io" + "os" + "path" + "testing" + + "github.com/docker/docker/daemon/graphdriver" + _ "github.com/docker/docker/daemon/graphdriver/vfs" // import the vfs driver so it is used in the tests + "github.com/docker/docker/image" + "github.com/docker/docker/utils" + "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" +) + +const ( + testImageName = "myapp" + testImageID = "foo" +) + +func fakeTar() (io.Reader, error) { + uid := os.Getuid() + gid := os.Getgid() + + content := []byte("Hello world!\n") + buf := new(bytes.Buffer) + tw := tar.NewWriter(buf) + for _, name := range []string{"/etc/postgres/postgres.conf", "/etc/passwd", "/var/log/postgres/postgres.conf"} { + hdr := new(tar.Header) + + // Leaving these fields blank requires root privileges + hdr.Uid = uid + hdr.Gid = gid + + hdr.Size = int64(len(content)) + hdr.Name = name + if err := tw.WriteHeader(hdr); err != nil { + return nil, err + } + tw.Write([]byte(content)) + } + tw.Close() + return buf, nil +} + +func mkTestTagStore(root string, t *testing.T) *TagStore { + driver, err := graphdriver.New(root, nil) + if err != nil { + t.Fatal(err) + } + graph, err := NewGraph(root, driver) + if err != nil { + t.Fatal(err) + } + store, err := NewTagStore(path.Join(root, "tags"), graph, nil) + if err != nil { + t.Fatal(err) + } + archive, err := fakeTar() + if err != nil { + t.Fatal(err) + } + img := &image.Image{ID: testImageID} + if err := graph.Register(img, nil, archive); err != nil { + t.Fatal(err) + } + if err := store.Set(testImageName, "", testImageID, false); err != nil { + t.Fatal(err) + } + return store +} + +func TestLookupImage(t *testing.T) { + tmp, err := utils.TestDirectory("") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + store := mkTestTagStore(tmp, t) + defer store.graph.driver.Cleanup() + + if img, err := store.LookupImage(testImageName); err != nil { + t.Fatal(err) + } else if img == nil { + t.Errorf("Expected 1 image, none found") + } + if img, err := store.LookupImage(testImageName + ":" + DEFAULTTAG); err != nil { + t.Fatal(err) + } else if img == nil { + t.Errorf("Expected 1 image, none found") + } + + if img, err := store.LookupImage(testImageName + ":" + "fail"); err == nil { + t.Errorf("Expected error, none found") + } else if img != nil { + t.Errorf("Expected 0 image, 1 found") + } + + if img, err := store.LookupImage("fail:fail"); err == nil { + t.Errorf("Expected error, none found") + } else if img != nil { + t.Errorf("Expected 0 image, 1 found") + } + + if img, err := store.LookupImage(testImageID); err != nil { + t.Fatal(err) + } else if img == nil { + t.Errorf("Expected 1 image, none found") + } + + if img, err := store.LookupImage(testImageName + ":" + testImageID); err != nil { + t.Fatal(err) + } else if img == nil { + t.Errorf("Expected 1 image, none found") + } +} + +func TestValidTagName(t *testing.T) { + validTags := []string{"9", "foo", "foo-test", "bar.baz.boo"} + for _, tag := range validTags { + if err := ValidateTagName(tag); err != nil { + t.Errorf("'%s' should've been a valid tag", tag) + } + } +} + +func TestInvalidTagName(t *testing.T) { + validTags := []string{"-9", ".foo", "-test", ".", "-"} + for _, tag := range validTags { + if err := ValidateTagName(tag); err == nil { + t.Errorf("'%s' shouldn't have been a valid tag", tag) + } + } +} + +func TestOfficialName(t *testing.T) { + names := map[string]bool{ + "library/ubuntu": true, + "nonlibrary/ubuntu": false, + "ubuntu": true, + "other/library": false, + } + for name, isOfficial := range names { + result := isOfficialName(name) + if result != isOfficial { + t.Errorf("Unexpected result for %s\n\tExpecting: %v\n\tActual: %v", name, isOfficial, result) + } + } +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/graph/viz.go b/tests/Godeps/_workspace/src/github.com/docker/docker/graph/viz.go new file mode 100644 index 0000000000..924c22b6a2 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/graph/viz.go @@ -0,0 +1,38 @@ +package graph + +import ( + "strings" + + "github.com/docker/docker/engine" + "github.com/docker/docker/image" +) + +func (s *TagStore) CmdViz(job *engine.Job) engine.Status { + images, _ := s.graph.Map() + if images == nil { + return engine.StatusOK + } + job.Stdout.Write([]byte("digraph docker {\n")) + + var ( + parentImage *image.Image + err error + ) + for _, image := range images { + parentImage, err = image.GetParent() + if err != nil { + return job.Errorf("Error while getting parent image: %v", err) + } + if parentImage != nil { + job.Stdout.Write([]byte(" \"" + parentImage.ID + "\" -> \"" + image.ID + "\"\n")) + } else { + job.Stdout.Write([]byte(" base -> \"" + image.ID + "\" [style=invis]\n")) + } + } + + for id, repos := range s.GetRepoRefs() { + job.Stdout.Write([]byte(" \"" + id + "\" [label=\"" + id + "\\n" + strings.Join(repos, "\\n") + "\",shape=box,fillcolor=\"paleturquoise\",style=\"filled,rounded\"];\n")) + } + job.Stdout.Write([]byte(" base [style=invisible]\n}\n")) + return engine.StatusOK +} diff --git a/tests/_vendor/src/github.com/docker/docker/image/graph.go b/tests/Godeps/_workspace/src/github.com/docker/docker/image/graph.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/image/graph.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/image/graph.go diff --git a/tests/_vendor/src/github.com/docker/docker/image/image.go b/tests/Godeps/_workspace/src/github.com/docker/docker/image/image.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/image/image.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/image/image.go diff --git a/tests/_vendor/src/github.com/docker/docker/nat/nat.go b/tests/Godeps/_workspace/src/github.com/docker/docker/nat/nat.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/nat/nat.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/nat/nat.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/nat/nat_test.go b/tests/Godeps/_workspace/src/github.com/docker/docker/nat/nat_test.go new file mode 100644 index 0000000000..a8c2cb584e --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/nat/nat_test.go @@ -0,0 +1,201 @@ +package nat + +import ( + "testing" +) + +func TestParsePort(t *testing.T) { + var ( + p int + err error + ) + + p, err = ParsePort("1234") + + if err != nil || p != 1234 { + t.Fatal("Parsing '1234' did not succeed") + } + + // FIXME currently this is a valid port. I don't think it should be. + // I'm leaving this test commented out until we make a decision. + // - erikh + + /* + p, err = ParsePort("0123") + + if err != nil { + t.Fatal("Successfully parsed port '0123' to '123'") + } + */ + + p, err = ParsePort("asdf") + + if err == nil || p != 0 { + t.Fatal("Parsing port 'asdf' succeeded") + } + + p, err = ParsePort("1asdf") + + if err == nil || p != 0 { + t.Fatal("Parsing port '1asdf' succeeded") + } +} + +func TestPort(t *testing.T) { + p := NewPort("tcp", "1234") + + if string(p) != "1234/tcp" { + t.Fatal("tcp, 1234 did not result in the string 1234/tcp") + } + + if p.Proto() != "tcp" { + t.Fatal("protocol was not tcp") + } + + if p.Port() != "1234" { + t.Fatal("port string value was not 1234") + } + + if p.Int() != 1234 { + t.Fatal("port int value was not 1234") + } +} + +func TestSplitProtoPort(t *testing.T) { + var ( + proto string + port string + ) + + proto, port = SplitProtoPort("1234/tcp") + + if proto != "tcp" || port != "1234" { + t.Fatal("Could not split 1234/tcp properly") + } + + proto, port = SplitProtoPort("") + + if proto != "" || port != "" { + t.Fatal("parsing an empty string yielded surprising results") + } + + proto, port = SplitProtoPort("1234") + + if proto != "tcp" || port != "1234" { + t.Fatal("tcp is not the default protocol for portspec '1234'") + } + + proto, port = SplitProtoPort("1234/") + + if proto != "tcp" || port != "1234" { + t.Fatal("parsing '1234/' yielded:" + port + "/" + proto) + } + + proto, port = SplitProtoPort("/tcp") + + if proto != "" || port != "" { + t.Fatal("parsing '/tcp' yielded:" + port + "/" + proto) + } +} + +func TestParsePortSpecs(t *testing.T) { + var ( + portMap map[Port]struct{} + bindingMap map[Port][]PortBinding + err error + ) + + portMap, bindingMap, err = ParsePortSpecs([]string{"1234/tcp", "2345/udp"}) + + if err != nil { + t.Fatalf("Error while processing ParsePortSpecs: %s", err.Error()) + } + + if _, ok := portMap[Port("1234/tcp")]; !ok { + t.Fatal("1234/tcp was not parsed properly") + } + + if _, ok := portMap[Port("2345/udp")]; !ok { + t.Fatal("2345/udp was not parsed properly") + } + + for portspec, bindings := range bindingMap { + if len(bindings) != 1 { + t.Fatalf("%s should have exactly one binding", portspec) + } + + if bindings[0].HostIp != "" { + t.Fatalf("HostIp should not be set for %s", portspec) + } + + if bindings[0].HostPort != "" { + t.Fatalf("HostPort should not be set for %s", portspec) + } + } + + portMap, bindingMap, err = ParsePortSpecs([]string{"1234:1234/tcp", "2345:2345/udp"}) + + if err != nil { + t.Fatalf("Error while processing ParsePortSpecs: %s", err.Error()) + } + + if _, ok := portMap[Port("1234/tcp")]; !ok { + t.Fatal("1234/tcp was not parsed properly") + } + + if _, ok := portMap[Port("2345/udp")]; !ok { + t.Fatal("2345/udp was not parsed properly") + } + + for portspec, bindings := range bindingMap { + _, port := SplitProtoPort(string(portspec)) + + if len(bindings) != 1 { + t.Fatalf("%s should have exactly one binding", portspec) + } + + if bindings[0].HostIp != "" { + t.Fatalf("HostIp should not be set for %s", portspec) + } + + if bindings[0].HostPort != port { + t.Fatalf("HostPort should be %s for %s", port, portspec) + } + } + + portMap, bindingMap, err = ParsePortSpecs([]string{"0.0.0.0:1234:1234/tcp", "0.0.0.0:2345:2345/udp"}) + + if err != nil { + t.Fatalf("Error while processing ParsePortSpecs: %s", err.Error()) + } + + if _, ok := portMap[Port("1234/tcp")]; !ok { + t.Fatal("1234/tcp was not parsed properly") + } + + if _, ok := portMap[Port("2345/udp")]; !ok { + t.Fatal("2345/udp was not parsed properly") + } + + for portspec, bindings := range bindingMap { + _, port := SplitProtoPort(string(portspec)) + + if len(bindings) != 1 { + t.Fatalf("%s should have exactly one binding", portspec) + } + + if bindings[0].HostIp != "0.0.0.0" { + t.Fatalf("HostIp is not 0.0.0.0 for %s", portspec) + } + + if bindings[0].HostPort != port { + t.Fatalf("HostPort should be %s for %s", port, portspec) + } + } + + _, _, err = ParsePortSpecs([]string{"localhost:1234:1234/tcp"}) + + if err == nil { + t.Fatal("Received no error while trying to parse a hostname instead of ip") + } +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/nat/sort.go b/tests/Godeps/_workspace/src/github.com/docker/docker/nat/sort.go new file mode 100644 index 0000000000..f36c12f7bb --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/nat/sort.go @@ -0,0 +1,28 @@ +package nat + +import "sort" + +type portSorter struct { + ports []Port + by func(i, j Port) bool +} + +func (s *portSorter) Len() int { + return len(s.ports) +} + +func (s *portSorter) Swap(i, j int) { + s.ports[i], s.ports[j] = s.ports[j], s.ports[i] +} + +func (s *portSorter) Less(i, j int) bool { + ip := s.ports[i] + jp := s.ports[j] + + return s.by(ip, jp) +} + +func Sort(ports []Port, predicate func(i, j Port) bool) { + s := &portSorter{ports, predicate} + sort.Sort(s) +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/nat/sort_test.go b/tests/Godeps/_workspace/src/github.com/docker/docker/nat/sort_test.go new file mode 100644 index 0000000000..5d490e321b --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/nat/sort_test.go @@ -0,0 +1,41 @@ +package nat + +import ( + "fmt" + "testing" +) + +func TestSortUniquePorts(t *testing.T) { + ports := []Port{ + Port("6379/tcp"), + Port("22/tcp"), + } + + Sort(ports, func(ip, jp Port) bool { + return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && ip.Proto() == "tcp") + }) + + first := ports[0] + if fmt.Sprint(first) != "22/tcp" { + t.Log(fmt.Sprint(first)) + t.Fail() + } +} + +func TestSortSamePortWithDifferentProto(t *testing.T) { + ports := []Port{ + Port("8888/tcp"), + Port("8888/udp"), + Port("6379/tcp"), + Port("6379/udp"), + } + + Sort(ports, func(ip, jp Port) bool { + return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && ip.Proto() == "tcp") + }) + + first := ports[0] + if fmt.Sprint(first) != "6379/tcp" { + t.Fail() + } +} diff --git a/tests/_vendor/src/github.com/docker/docker/opts/envfile.go b/tests/Godeps/_workspace/src/github.com/docker/docker/opts/envfile.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/opts/envfile.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/opts/envfile.go diff --git a/tests/_vendor/src/github.com/docker/docker/opts/ip.go b/tests/Godeps/_workspace/src/github.com/docker/docker/opts/ip.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/opts/ip.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/opts/ip.go diff --git a/tests/_vendor/src/github.com/docker/docker/opts/opts.go b/tests/Godeps/_workspace/src/github.com/docker/docker/opts/opts.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/opts/opts.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/opts/opts.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/opts/opts_test.go b/tests/Godeps/_workspace/src/github.com/docker/docker/opts/opts_test.go new file mode 100644 index 0000000000..09b5aa780b --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/opts/opts_test.go @@ -0,0 +1,90 @@ +package opts + +import ( + "testing" +) + +func TestValidateIPAddress(t *testing.T) { + if ret, err := ValidateIPAddress(`1.2.3.4`); err != nil || ret == "" { + t.Fatalf("ValidateIPAddress(`1.2.3.4`) got %s %s", ret, err) + } + + if ret, err := ValidateIPAddress(`127.0.0.1`); err != nil || ret == "" { + t.Fatalf("ValidateIPAddress(`127.0.0.1`) got %s %s", ret, err) + } + + if ret, err := ValidateIPAddress(`::1`); err != nil || ret == "" { + t.Fatalf("ValidateIPAddress(`::1`) got %s %s", ret, err) + } + + if ret, err := ValidateIPAddress(`127`); err == nil || ret != "" { + t.Fatalf("ValidateIPAddress(`127`) got %s %s", ret, err) + } + + if ret, err := ValidateIPAddress(`random invalid string`); err == nil || ret != "" { + t.Fatalf("ValidateIPAddress(`random invalid string`) got %s %s", ret, err) + } + +} + +func TestListOpts(t *testing.T) { + o := NewListOpts(nil) + o.Set("foo") + o.String() +} + +func TestValidateDnsSearch(t *testing.T) { + valid := []string{ + `.`, + `a`, + `a.`, + `1.foo`, + `17.foo`, + `foo.bar`, + `foo.bar.baz`, + `foo.bar.`, + `foo.bar.baz`, + `foo1.bar2`, + `foo1.bar2.baz`, + `1foo.2bar.`, + `1foo.2bar.baz`, + `foo-1.bar-2`, + `foo-1.bar-2.baz`, + `foo-1.bar-2.`, + `foo-1.bar-2.baz`, + `1-foo.2-bar`, + `1-foo.2-bar.baz`, + `1-foo.2-bar.`, + `1-foo.2-bar.baz`, + } + + invalid := []string{ + ``, + ` `, + ` `, + `17`, + `17.`, + `.17`, + `17-.`, + `17-.foo`, + `.foo`, + `foo-.bar`, + `-foo.bar`, + `foo.bar-`, + `foo.bar-.baz`, + `foo.-bar`, + `foo.-bar.baz`, + } + + for _, domain := range valid { + if ret, err := ValidateDnsSearch(domain); err != nil || ret == "" { + t.Fatalf("ValidateDnsSearch(`"+domain+"`) got %s %s", ret, err) + } + } + + for _, domain := range invalid { + if ret, err := ValidateDnsSearch(domain); err == nil || ret != "" { + t.Fatalf("ValidateDnsSearch(`"+domain+"`) got %s %s", ret, err) + } + } +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/MAINTAINERS b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/MAINTAINERS new file mode 100644 index 0000000000..2aac7265d2 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/MAINTAINERS @@ -0,0 +1,2 @@ +Cristian Staretu (@unclejack) +Tibor Vass (@tiborvass) diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/README.md b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/README.md new file mode 100644 index 0000000000..7307d9694f --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/README.md @@ -0,0 +1 @@ +This code provides helper functions for dealing with archive files. diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/archive/archive.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/archive/archive.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_test.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_test.go new file mode 100644 index 0000000000..b46f953228 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_test.go @@ -0,0 +1,244 @@ +package archive + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "testing" + "time" + + "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" +) + +func TestCmdStreamLargeStderr(t *testing.T) { + cmd := exec.Command("/bin/sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello") + out, err := CmdStream(cmd, nil) + if err != nil { + t.Fatalf("Failed to start command: %s", err) + } + errCh := make(chan error) + go func() { + _, err := io.Copy(ioutil.Discard, out) + errCh <- err + }() + select { + case err := <-errCh: + if err != nil { + t.Fatalf("Command should not have failed (err=%.100s...)", err) + } + case <-time.After(5 * time.Second): + t.Fatalf("Command did not complete in 5 seconds; probable deadlock") + } +} + +func TestCmdStreamBad(t *testing.T) { + badCmd := exec.Command("/bin/sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1") + out, err := CmdStream(badCmd, nil) + if err != nil { + t.Fatalf("Failed to start command: %s", err) + } + if output, err := ioutil.ReadAll(out); err == nil { + t.Fatalf("Command should have failed") + } else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" { + t.Fatalf("Wrong error value (%s)", err) + } else if s := string(output); s != "hello\n" { + t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) + } +} + +func TestCmdStreamGood(t *testing.T) { + cmd := exec.Command("/bin/sh", "-c", "echo hello; exit 0") + out, err := CmdStream(cmd, nil) + if err != nil { + t.Fatal(err) + } + if output, err := ioutil.ReadAll(out); err != nil { + t.Fatalf("Command should not have failed (err=%s)", err) + } else if s := string(output); s != "hello\n" { + t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) + } +} + +func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error) { + archive, err := TarWithOptions(origin, options) + if err != nil { + t.Fatal(err) + } + defer archive.Close() + + buf := make([]byte, 10) + if _, err := archive.Read(buf); err != nil { + return nil, err + } + wrap := io.MultiReader(bytes.NewReader(buf), archive) + + detectedCompression := DetectCompression(buf) + compression := options.Compression + if detectedCompression.Extension() != compression.Extension() { + return nil, fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension()) + } + + tmp, err := ioutil.TempDir("", "docker-test-untar") + if err != nil { + return nil, err + } + defer os.RemoveAll(tmp) + if err := Untar(wrap, tmp, nil); err != nil { + return nil, err + } + if _, err := os.Stat(tmp); err != nil { + return nil, err + } + + return ChangesDirs(origin, tmp) +} + +func TestTarUntar(t *testing.T) { + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { + t.Fatal(err) + } + + for _, c := range []Compression{ + Uncompressed, + Gzip, + } { + changes, err := tarUntar(t, origin, &TarOptions{ + Compression: c, + Excludes: []string{"3"}, + }) + + if err != nil { + t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) + } + + if len(changes) != 1 || changes[0].Path != "/3" { + t.Fatalf("Unexpected differences after tarUntar: %v", changes) + } + } +} + +func TestTarWithOptions(t *testing.T) { + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { + t.Fatal(err) + } + + cases := []struct { + opts *TarOptions + numChanges int + }{ + {&TarOptions{Includes: []string{"1"}}, 1}, + {&TarOptions{Excludes: []string{"2"}}, 1}, + } + for _, testCase := range cases { + changes, err := tarUntar(t, origin, testCase.opts) + if err != nil { + t.Fatalf("Error tar/untar when testing inclusion/exclusion: %s", err) + } + if len(changes) != testCase.numChanges { + t.Errorf("Expected %d changes, got %d for %+v:", + testCase.numChanges, len(changes), testCase.opts) + } + } +} + +// Some tar archives such as http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev21.tar.gz +// use PAX Global Extended Headers. +// Failing prevents the archives from being uncompressed during ADD +func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) { + hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader} + err := createTarFile("pax_global_header", "some_dir", &hdr, nil, true) + if err != nil { + t.Fatal(err) + } +} + +// Some tar have both GNU specific (huge uid) and Ustar specific (long name) things. +// Not supposed to happen (should use PAX instead of Ustar for long name) but it does and it should still work. +func TestUntarUstarGnuConflict(t *testing.T) { + f, err := os.Open("testdata/broken.tar") + if err != nil { + t.Fatal(err) + } + found := false + tr := tar.NewReader(f) + // Iterate through the files in the archive. + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + t.Fatal(err) + } + if hdr.Name == "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm" { + found = true + break + } + } + if !found { + t.Fatalf("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm") + } +} + +func prepareUntarSourceDirectory(numberOfFiles int, targetPath string) (int, error) { + fileData := []byte("fooo") + for n := 0; n < numberOfFiles; n++ { + fileName := fmt.Sprintf("file-%d", n) + if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { + return 0, err + } + } + totalSize := numberOfFiles * len(fileData) + return totalSize, nil +} + +func BenchmarkTarUntar(b *testing.B) { + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + b.Fatal(err) + } + tempDir, err := ioutil.TempDir("", "docker-test-untar-destination") + if err != nil { + b.Fatal(err) + } + target := path.Join(tempDir, "dest") + n, err := prepareUntarSourceDirectory(100, origin) + if err != nil { + b.Fatal(err) + } + b.ResetTimer() + b.SetBytes(int64(n)) + defer os.RemoveAll(origin) + defer os.RemoveAll(tempDir) + for n := 0; n < b.N; n++ { + err := TarUntar(origin, target) + if err != nil { + b.Fatal(err) + } + os.RemoveAll(target) + } +} diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/archive/changes.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/archive/changes.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_test.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_test.go new file mode 100644 index 0000000000..34c0f0da64 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_test.go @@ -0,0 +1,301 @@ +package archive + +import ( + "io/ioutil" + "os" + "os/exec" + "path" + "sort" + "testing" + "time" +) + +func max(x, y int) int { + if x >= y { + return x + } + return y +} + +func copyDir(src, dst string) error { + cmd := exec.Command("cp", "-a", src, dst) + if err := cmd.Run(); err != nil { + return err + } + return nil +} + +// Helper to sort []Change by path +type byPath struct{ changes []Change } + +func (b byPath) Less(i, j int) bool { return b.changes[i].Path < b.changes[j].Path } +func (b byPath) Len() int { return len(b.changes) } +func (b byPath) Swap(i, j int) { b.changes[i], b.changes[j] = b.changes[j], b.changes[i] } + +type FileType uint32 + +const ( + Regular FileType = iota + Dir + Symlink +) + +type FileData struct { + filetype FileType + path string + contents string + permissions os.FileMode +} + +func createSampleDir(t *testing.T, root string) { + files := []FileData{ + {Regular, "file1", "file1\n", 0600}, + {Regular, "file2", "file2\n", 0666}, + {Regular, "file3", "file3\n", 0404}, + {Regular, "file4", "file4\n", 0600}, + {Regular, "file5", "file5\n", 0600}, + {Regular, "file6", "file6\n", 0600}, + {Regular, "file7", "file7\n", 0600}, + {Dir, "dir1", "", 0740}, + {Regular, "dir1/file1-1", "file1-1\n", 01444}, + {Regular, "dir1/file1-2", "file1-2\n", 0666}, + {Dir, "dir2", "", 0700}, + {Regular, "dir2/file2-1", "file2-1\n", 0666}, + {Regular, "dir2/file2-2", "file2-2\n", 0666}, + {Dir, "dir3", "", 0700}, + {Regular, "dir3/file3-1", "file3-1\n", 0666}, + {Regular, "dir3/file3-2", "file3-2\n", 0666}, + {Dir, "dir4", "", 0700}, + {Regular, "dir4/file3-1", "file4-1\n", 0666}, + {Regular, "dir4/file3-2", "file4-2\n", 0666}, + {Symlink, "symlink1", "target1", 0666}, + {Symlink, "symlink2", "target2", 0666}, + } + + now := time.Now() + for _, info := range files { + p := path.Join(root, info.path) + if info.filetype == Dir { + if err := os.MkdirAll(p, info.permissions); err != nil { + t.Fatal(err) + } + } else if info.filetype == Regular { + if err := ioutil.WriteFile(p, []byte(info.contents), info.permissions); err != nil { + t.Fatal(err) + } + } else if info.filetype == Symlink { + if err := os.Symlink(info.contents, p); err != nil { + t.Fatal(err) + } + } + + if info.filetype != Symlink { + // Set a consistent ctime, atime for all files and dirs + if err := os.Chtimes(p, now, now); err != nil { + t.Fatal(err) + } + } + } +} + +// Create an directory, copy it, make sure we report no changes between the two +func TestChangesDirsEmpty(t *testing.T) { + src, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + createSampleDir(t, src) + dst := src + "-copy" + if err := copyDir(src, dst); err != nil { + t.Fatal(err) + } + changes, err := ChangesDirs(dst, src) + if err != nil { + t.Fatal(err) + } + + if len(changes) != 0 { + t.Fatalf("Reported changes for identical dirs: %v", changes) + } + os.RemoveAll(src) + os.RemoveAll(dst) +} + +func mutateSampleDir(t *testing.T, root string) { + // Remove a regular file + if err := os.RemoveAll(path.Join(root, "file1")); err != nil { + t.Fatal(err) + } + + // Remove a directory + if err := os.RemoveAll(path.Join(root, "dir1")); err != nil { + t.Fatal(err) + } + + // Remove a symlink + if err := os.RemoveAll(path.Join(root, "symlink1")); err != nil { + t.Fatal(err) + } + + // Rewrite a file + if err := ioutil.WriteFile(path.Join(root, "file2"), []byte("fileNN\n"), 0777); err != nil { + t.Fatal(err) + } + + // Replace a file + if err := os.RemoveAll(path.Join(root, "file3")); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(root, "file3"), []byte("fileMM\n"), 0404); err != nil { + t.Fatal(err) + } + + // Touch file + if err := os.Chtimes(path.Join(root, "file4"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil { + t.Fatal(err) + } + + // Replace file with dir + if err := os.RemoveAll(path.Join(root, "file5")); err != nil { + t.Fatal(err) + } + if err := os.MkdirAll(path.Join(root, "file5"), 0666); err != nil { + t.Fatal(err) + } + + // Create new file + if err := ioutil.WriteFile(path.Join(root, "filenew"), []byte("filenew\n"), 0777); err != nil { + t.Fatal(err) + } + + // Create new dir + if err := os.MkdirAll(path.Join(root, "dirnew"), 0766); err != nil { + t.Fatal(err) + } + + // Create a new symlink + if err := os.Symlink("targetnew", path.Join(root, "symlinknew")); err != nil { + t.Fatal(err) + } + + // Change a symlink + if err := os.RemoveAll(path.Join(root, "symlink2")); err != nil { + t.Fatal(err) + } + if err := os.Symlink("target2change", path.Join(root, "symlink2")); err != nil { + t.Fatal(err) + } + + // Replace dir with file + if err := os.RemoveAll(path.Join(root, "dir2")); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(root, "dir2"), []byte("dir2\n"), 0777); err != nil { + t.Fatal(err) + } + + // Touch dir + if err := os.Chtimes(path.Join(root, "dir3"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil { + t.Fatal(err) + } +} + +func TestChangesDirsMutated(t *testing.T) { + src, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + createSampleDir(t, src) + dst := src + "-copy" + if err := copyDir(src, dst); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(src) + defer os.RemoveAll(dst) + + mutateSampleDir(t, dst) + + changes, err := ChangesDirs(dst, src) + if err != nil { + t.Fatal(err) + } + + sort.Sort(byPath{changes}) + + expectedChanges := []Change{ + {"/dir1", ChangeDelete}, + {"/dir2", ChangeModify}, + {"/dir3", ChangeModify}, + {"/dirnew", ChangeAdd}, + {"/file1", ChangeDelete}, + {"/file2", ChangeModify}, + {"/file3", ChangeModify}, + {"/file4", ChangeModify}, + {"/file5", ChangeModify}, + {"/filenew", ChangeAdd}, + {"/symlink1", ChangeDelete}, + {"/symlink2", ChangeModify}, + {"/symlinknew", ChangeAdd}, + } + + for i := 0; i < max(len(changes), len(expectedChanges)); i++ { + if i >= len(expectedChanges) { + t.Fatalf("unexpected change %s\n", changes[i].String()) + } + if i >= len(changes) { + t.Fatalf("no change for expected change %s\n", expectedChanges[i].String()) + } + if changes[i].Path == expectedChanges[i].Path { + if changes[i] != expectedChanges[i] { + t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String()) + } + } else if changes[i].Path < expectedChanges[i].Path { + t.Fatalf("unexpected change %s\n", changes[i].String()) + } else { + t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String()) + } + } +} + +func TestApplyLayer(t *testing.T) { + src, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + createSampleDir(t, src) + defer os.RemoveAll(src) + dst := src + "-copy" + if err := copyDir(src, dst); err != nil { + t.Fatal(err) + } + mutateSampleDir(t, dst) + defer os.RemoveAll(dst) + + changes, err := ChangesDirs(dst, src) + if err != nil { + t.Fatal(err) + } + + layer, err := ExportChanges(dst, changes) + if err != nil { + t.Fatal(err) + } + + layerCopy, err := NewTempArchive(layer, "") + if err != nil { + t.Fatal(err) + } + + if err := ApplyLayer(src, layerCopy); err != nil { + t.Fatal(err) + } + + changes2, err := ChangesDirs(src, dst) + if err != nil { + t.Fatal(err) + } + + if len(changes2) != 0 { + t.Fatalf("Unexpected differences after reapplying mutation: %v", changes2) + } +} diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/archive/diff.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/archive/diff.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/testdata/broken.tar b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/testdata/broken.tar new file mode 100644 index 0000000000..8f10ea6b87 Binary files /dev/null and b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/testdata/broken.tar differ diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/archive/time_linux.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/time_linux.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/archive/time_linux.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/time_linux.go diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/archive/time_unsupported.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/time_unsupported.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/archive/time_unsupported.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/time_unsupported.go diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/archive/wrap.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/wrap.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/archive/wrap.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/wrap.go diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/fileutils/fileutils.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/fileutils/fileutils.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/fileutils/fileutils.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/fileutils/fileutils.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/httputils/MAINTAINERS b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/httputils/MAINTAINERS new file mode 100644 index 0000000000..6dde4769d7 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/httputils/MAINTAINERS @@ -0,0 +1 @@ +Cristian Staretu (@unclejack) diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/httputils/resumablerequestreader.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/httputils/resumablerequestreader.go new file mode 100644 index 0000000000..3cd1f49179 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/httputils/resumablerequestreader.go @@ -0,0 +1,93 @@ +package httputils + +import ( + "fmt" + "io" + "net/http" + "time" + + "github.com/docker/docker/pkg/log" +) + +type resumableRequestReader struct { + client *http.Client + request *http.Request + lastRange int64 + totalSize int64 + currentResponse *http.Response + failures uint32 + maxFailures uint32 +} + +// ResumableRequestReader makes it possible to resume reading a request's body transparently +// maxfail is the number of times we retry to make requests again (not resumes) +// totalsize is the total length of the body; auto detect if not provided +func ResumableRequestReader(c *http.Client, r *http.Request, maxfail uint32, totalsize int64) io.ReadCloser { + return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize} +} + +func ResumableRequestReaderWithInitialResponse(c *http.Client, r *http.Request, maxfail uint32, totalsize int64, initialResponse *http.Response) io.ReadCloser { + return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize, currentResponse: initialResponse} +} + +func (r *resumableRequestReader) Read(p []byte) (n int, err error) { + if r.client == nil || r.request == nil { + return 0, fmt.Errorf("client and request can't be nil\n") + } + isFreshRequest := false + if r.lastRange != 0 && r.currentResponse == nil { + readRange := fmt.Sprintf("bytes=%d-%d", r.lastRange, r.totalSize) + r.request.Header.Set("Range", readRange) + time.Sleep(5 * time.Second) + } + if r.currentResponse == nil { + r.currentResponse, err = r.client.Do(r.request) + isFreshRequest = true + } + if err != nil && r.failures+1 != r.maxFailures { + r.cleanUpResponse() + r.failures++ + time.Sleep(5 * time.Duration(r.failures) * time.Second) + return 0, nil + } else if err != nil { + r.cleanUpResponse() + return 0, err + } + if r.currentResponse.StatusCode == 416 && r.lastRange == r.totalSize && r.currentResponse.ContentLength == 0 { + r.cleanUpResponse() + return 0, io.EOF + } else if r.currentResponse.StatusCode != 206 && r.lastRange != 0 && isFreshRequest { + r.cleanUpResponse() + return 0, fmt.Errorf("the server doesn't support byte ranges") + } + if r.totalSize == 0 { + r.totalSize = r.currentResponse.ContentLength + } else if r.totalSize <= 0 { + r.cleanUpResponse() + return 0, fmt.Errorf("failed to auto detect content length") + } + n, err = r.currentResponse.Body.Read(p) + r.lastRange += int64(n) + if err != nil { + r.cleanUpResponse() + } + if err != nil && err != io.EOF { + log.Infof("encountered error during pull and clearing it before resume: %s", err) + err = nil + } + return n, err +} + +func (r *resumableRequestReader) Close() error { + r.cleanUpResponse() + r.client = nil + r.request = nil + return nil +} + +func (r *resumableRequestReader) cleanUpResponse() { + if r.currentResponse != nil { + r.currentResponse.Body.Close() + r.currentResponse = nil + } +} diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/ioutils/readers.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/readers.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/ioutils/readers.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/readers.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/readers_test.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/readers_test.go new file mode 100644 index 0000000000..a7a2dad176 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/readers_test.go @@ -0,0 +1,34 @@ +package ioutils + +import ( + "bytes" + "io" + "io/ioutil" + "testing" +) + +func TestBufReader(t *testing.T) { + reader, writer := io.Pipe() + bufreader := NewBufReader(reader) + + // Write everything down to a Pipe + // Usually, a pipe should block but because of the buffered reader, + // the writes will go through + done := make(chan bool) + go func() { + writer.Write([]byte("hello world")) + writer.Close() + done <- true + }() + + // Drain the reader *after* everything has been written, just to verify + // it is indeed buffering + <-done + output, err := ioutil.ReadAll(bufreader) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(output, []byte("hello world")) { + t.Error(string(output)) + } +} diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/ioutils/writers.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writers.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/ioutils/writers.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writers.go diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/log/log.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/log/log.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/log/log.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/log/log.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/log/log_test.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/log/log_test.go new file mode 100644 index 0000000000..83ba5fd27c --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/log/log_test.go @@ -0,0 +1,37 @@ +package log + +import ( + "bytes" + "regexp" + + "testing" +) + +func TestLogFatalf(t *testing.T) { + var output *bytes.Buffer + + tests := []struct { + Level priority + Format string + Values []interface{} + ExpectedPattern string + }{ + {fatal, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[fatal\\] testing.go:\\d+ 1 \\+ 1 = 2"}, + {error, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[error\\] testing.go:\\d+ 1 \\+ 1 = 2"}, + {info, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[info\\] 1 \\+ 1 = 2"}, + {debug, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[debug\\] testing.go:\\d+ 1 \\+ 1 = 2"}, + } + + for i, test := range tests { + output = &bytes.Buffer{} + logf(output, test.Level, test.Format, test.Values...) + + expected := regexp.MustCompile(test.ExpectedPattern) + if !expected.MatchString(output.String()) { + t.Errorf("[%d] Log output does not match expected pattern:\n\tExpected: %s\n\tOutput: %s", + i, + expected.String(), + output.String()) + } + } +} diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/mflag/LICENSE b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/LICENSE similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/mflag/LICENSE rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/LICENSE diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/MAINTAINERS b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/MAINTAINERS new file mode 100644 index 0000000000..e0f18f14f1 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/MAINTAINERS @@ -0,0 +1 @@ +Victor Vieux (@vieux) diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/README.md b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/README.md new file mode 100644 index 0000000000..da00efa336 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/README.md @@ -0,0 +1,40 @@ +Package mflag (aka multiple-flag) implements command-line flag parsing. +It's an **hacky** fork of the [official golang package](http://golang.org/pkg/flag/) + +It adds: + +* both short and long flag version +`./example -s red` `./example --string blue` + +* multiple names for the same option +``` +$>./example -h +Usage of example: + -s, --string="": a simple string +``` + +___ +It is very flexible on purpose, so you can do things like: +``` +$>./example -h +Usage of example: + -s, -string, --string="": a simple string +``` + +Or: +``` +$>./example -h +Usage of example: + -oldflag, --newflag="": a simple string +``` + +You can also hide some flags from the usage, so if we want only `--newflag`: +``` +$>./example -h +Usage of example: + --newflag="": a simple string +$>./example -oldflag str +str +``` + +See [example.go](example/example.go) for more details. diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/example/example.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/example/example.go new file mode 100644 index 0000000000..2e766dd1e5 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/example/example.go @@ -0,0 +1,36 @@ +package main + +import ( + "fmt" + + flag "github.com/docker/docker/pkg/mflag" +) + +var ( + i int + str string + b, b2, h bool +) + +func init() { + flag.Bool([]string{"#hp", "#-halp"}, false, "display the halp") + flag.BoolVar(&b, []string{"b", "#bal", "#bol", "-bal"}, false, "a simple bool") + flag.BoolVar(&b, []string{"g", "#gil"}, false, "a simple bool") + flag.BoolVar(&b2, []string{"#-bool"}, false, "a simple bool") + flag.IntVar(&i, []string{"-integer", "-number"}, -1, "a simple integer") + flag.StringVar(&str, []string{"s", "#hidden", "-string"}, "", "a simple string") //-s -hidden and --string will work, but -hidden won't be in the usage + flag.BoolVar(&h, []string{"h", "#help", "-help"}, false, "display the help") + flag.StringVar(&str, []string{"mode"}, "mode1", "set the mode\nmode1: use the mode1\nmode2: use the mode2\nmode3: use the mode3") + flag.Parse() +} +func main() { + if h { + flag.PrintDefaults() + } else { + fmt.Printf("s/#hidden/-string: %s\n", str) + fmt.Printf("b: %t\n", b) + fmt.Printf("-bool: %t\n", b2) + fmt.Printf("s/#hidden/-string(via lookup): %s\n", flag.Lookup("s").Value.String()) + fmt.Printf("ARGS: %v\n", flag.Args()) + } +} diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/mflag/flag.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/flag.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/mflag/flag.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/flag.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/flag_test.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/flag_test.go new file mode 100644 index 0000000000..340a1cb175 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/flag_test.go @@ -0,0 +1,506 @@ +// Copyright 2014 The Docker & Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mflag_test + +import ( + "bytes" + "fmt" + . "github.com/docker/docker/pkg/mflag" + "os" + "sort" + "strings" + "testing" + "time" +) + +// ResetForTesting clears all flag state and sets the usage function as directed. +// After calling ResetForTesting, parse errors in flag handling will not +// exit the program. +func ResetForTesting(usage func()) { + CommandLine = NewFlagSet(os.Args[0], ContinueOnError) + Usage = usage +} +func boolString(s string) string { + if s == "0" { + return "false" + } + return "true" +} + +func TestEverything(t *testing.T) { + ResetForTesting(nil) + Bool([]string{"test_bool"}, false, "bool value") + Int([]string{"test_int"}, 0, "int value") + Int64([]string{"test_int64"}, 0, "int64 value") + Uint([]string{"test_uint"}, 0, "uint value") + Uint64([]string{"test_uint64"}, 0, "uint64 value") + String([]string{"test_string"}, "0", "string value") + Float64([]string{"test_float64"}, 0, "float64 value") + Duration([]string{"test_duration"}, 0, "time.Duration value") + + m := make(map[string]*Flag) + desired := "0" + visitor := func(f *Flag) { + for _, name := range f.Names { + if len(name) > 5 && name[0:5] == "test_" { + m[name] = f + ok := false + switch { + case f.Value.String() == desired: + ok = true + case name == "test_bool" && f.Value.String() == boolString(desired): + ok = true + case name == "test_duration" && f.Value.String() == desired+"s": + ok = true + } + if !ok { + t.Error("Visit: bad value", f.Value.String(), "for", name) + } + } + } + } + VisitAll(visitor) + if len(m) != 8 { + t.Error("VisitAll misses some flags") + for k, v := range m { + t.Log(k, *v) + } + } + m = make(map[string]*Flag) + Visit(visitor) + if len(m) != 0 { + t.Errorf("Visit sees unset flags") + for k, v := range m { + t.Log(k, *v) + } + } + // Now set all flags + Set("test_bool", "true") + Set("test_int", "1") + Set("test_int64", "1") + Set("test_uint", "1") + Set("test_uint64", "1") + Set("test_string", "1") + Set("test_float64", "1") + Set("test_duration", "1s") + desired = "1" + Visit(visitor) + if len(m) != 8 { + t.Error("Visit fails after set") + for k, v := range m { + t.Log(k, *v) + } + } + // Now test they're visited in sort order. + var flagNames []string + Visit(func(f *Flag) { + for _, name := range f.Names { + flagNames = append(flagNames, name) + } + }) + if !sort.StringsAreSorted(flagNames) { + t.Errorf("flag names not sorted: %v", flagNames) + } +} + +func TestGet(t *testing.T) { + ResetForTesting(nil) + Bool([]string{"test_bool"}, true, "bool value") + Int([]string{"test_int"}, 1, "int value") + Int64([]string{"test_int64"}, 2, "int64 value") + Uint([]string{"test_uint"}, 3, "uint value") + Uint64([]string{"test_uint64"}, 4, "uint64 value") + String([]string{"test_string"}, "5", "string value") + Float64([]string{"test_float64"}, 6, "float64 value") + Duration([]string{"test_duration"}, 7, "time.Duration value") + + visitor := func(f *Flag) { + for _, name := range f.Names { + if len(name) > 5 && name[0:5] == "test_" { + g, ok := f.Value.(Getter) + if !ok { + t.Errorf("Visit: value does not satisfy Getter: %T", f.Value) + return + } + switch name { + case "test_bool": + ok = g.Get() == true + case "test_int": + ok = g.Get() == int(1) + case "test_int64": + ok = g.Get() == int64(2) + case "test_uint": + ok = g.Get() == uint(3) + case "test_uint64": + ok = g.Get() == uint64(4) + case "test_string": + ok = g.Get() == "5" + case "test_float64": + ok = g.Get() == float64(6) + case "test_duration": + ok = g.Get() == time.Duration(7) + } + if !ok { + t.Errorf("Visit: bad value %T(%v) for %s", g.Get(), g.Get(), name) + } + } + } + } + VisitAll(visitor) +} + +func TestUsage(t *testing.T) { + called := false + ResetForTesting(func() { called = true }) + if CommandLine.Parse([]string{"-x"}) == nil { + t.Error("parse did not fail for unknown flag") + } + if !called { + t.Error("did not call Usage for unknown flag") + } +} + +func testParse(f *FlagSet, t *testing.T) { + if f.Parsed() { + t.Error("f.Parse() = true before Parse") + } + boolFlag := f.Bool([]string{"bool"}, false, "bool value") + bool2Flag := f.Bool([]string{"bool2"}, false, "bool2 value") + intFlag := f.Int([]string{"-int"}, 0, "int value") + int64Flag := f.Int64([]string{"-int64"}, 0, "int64 value") + uintFlag := f.Uint([]string{"uint"}, 0, "uint value") + uint64Flag := f.Uint64([]string{"-uint64"}, 0, "uint64 value") + stringFlag := f.String([]string{"string"}, "0", "string value") + singleQuoteFlag := f.String([]string{"squote"}, "", "single quoted value") + doubleQuoteFlag := f.String([]string{"dquote"}, "", "double quoted value") + mixedQuoteFlag := f.String([]string{"mquote"}, "", "mixed quoted value") + mixed2QuoteFlag := f.String([]string{"mquote2"}, "", "mixed2 quoted value") + nestedQuoteFlag := f.String([]string{"nquote"}, "", "nested quoted value") + nested2QuoteFlag := f.String([]string{"nquote2"}, "", "nested2 quoted value") + float64Flag := f.Float64([]string{"float64"}, 0, "float64 value") + durationFlag := f.Duration([]string{"duration"}, 5*time.Second, "time.Duration value") + extra := "one-extra-argument" + args := []string{ + "-bool", + "-bool2=true", + "--int", "22", + "--int64", "0x23", + "-uint", "24", + "--uint64", "25", + "-string", "hello", + "-squote='single'", + `-dquote="double"`, + `-mquote='mixed"`, + `-mquote2="mixed2'`, + `-nquote="'single nested'"`, + `-nquote2='"double nested"'`, + "-float64", "2718e28", + "-duration", "2m", + extra, + } + if err := f.Parse(args); err != nil { + t.Fatal(err) + } + if !f.Parsed() { + t.Error("f.Parse() = false after Parse") + } + if *boolFlag != true { + t.Error("bool flag should be true, is ", *boolFlag) + } + if *bool2Flag != true { + t.Error("bool2 flag should be true, is ", *bool2Flag) + } + if *intFlag != 22 { + t.Error("int flag should be 22, is ", *intFlag) + } + if *int64Flag != 0x23 { + t.Error("int64 flag should be 0x23, is ", *int64Flag) + } + if *uintFlag != 24 { + t.Error("uint flag should be 24, is ", *uintFlag) + } + if *uint64Flag != 25 { + t.Error("uint64 flag should be 25, is ", *uint64Flag) + } + if *stringFlag != "hello" { + t.Error("string flag should be `hello`, is ", *stringFlag) + } + if *singleQuoteFlag != "single" { + t.Error("single quote string flag should be `single`, is ", *singleQuoteFlag) + } + if *doubleQuoteFlag != "double" { + t.Error("double quote string flag should be `double`, is ", *doubleQuoteFlag) + } + if *mixedQuoteFlag != `'mixed"` { + t.Error("mixed quote string flag should be `'mixed\"`, is ", *mixedQuoteFlag) + } + if *mixed2QuoteFlag != `"mixed2'` { + t.Error("mixed2 quote string flag should be `\"mixed2'`, is ", *mixed2QuoteFlag) + } + if *nestedQuoteFlag != "'single nested'" { + t.Error("nested quote string flag should be `'single nested'`, is ", *nestedQuoteFlag) + } + if *nested2QuoteFlag != `"double nested"` { + t.Error("double quote string flag should be `\"double nested\"`, is ", *nested2QuoteFlag) + } + if *float64Flag != 2718e28 { + t.Error("float64 flag should be 2718e28, is ", *float64Flag) + } + if *durationFlag != 2*time.Minute { + t.Error("duration flag should be 2m, is ", *durationFlag) + } + if len(f.Args()) != 1 { + t.Error("expected one argument, got", len(f.Args())) + } else if f.Args()[0] != extra { + t.Errorf("expected argument %q got %q", extra, f.Args()[0]) + } +} + +func testPanic(f *FlagSet, t *testing.T) { + f.Int([]string{"-int"}, 0, "int value") + if f.Parsed() { + t.Error("f.Parse() = true before Parse") + } + args := []string{ + "-int", "21", + } + f.Parse(args) +} + +func TestParsePanic(t *testing.T) { + ResetForTesting(func() {}) + testPanic(CommandLine, t) +} + +func TestParse(t *testing.T) { + ResetForTesting(func() { t.Error("bad parse") }) + testParse(CommandLine, t) +} + +func TestFlagSetParse(t *testing.T) { + testParse(NewFlagSet("test", ContinueOnError), t) +} + +// Declare a user-defined flag type. +type flagVar []string + +func (f *flagVar) String() string { + return fmt.Sprint([]string(*f)) +} + +func (f *flagVar) Set(value string) error { + *f = append(*f, value) + return nil +} + +func TestUserDefined(t *testing.T) { + var flags FlagSet + flags.Init("test", ContinueOnError) + var v flagVar + flags.Var(&v, []string{"v"}, "usage") + if err := flags.Parse([]string{"-v", "1", "-v", "2", "-v=3"}); err != nil { + t.Error(err) + } + if len(v) != 3 { + t.Fatal("expected 3 args; got ", len(v)) + } + expect := "[1 2 3]" + if v.String() != expect { + t.Errorf("expected value %q got %q", expect, v.String()) + } +} + +// Declare a user-defined boolean flag type. +type boolFlagVar struct { + count int +} + +func (b *boolFlagVar) String() string { + return fmt.Sprintf("%d", b.count) +} + +func (b *boolFlagVar) Set(value string) error { + if value == "true" { + b.count++ + } + return nil +} + +func (b *boolFlagVar) IsBoolFlag() bool { + return b.count < 4 +} + +func TestUserDefinedBool(t *testing.T) { + var flags FlagSet + flags.Init("test", ContinueOnError) + var b boolFlagVar + var err error + flags.Var(&b, []string{"b"}, "usage") + if err = flags.Parse([]string{"-b", "-b", "-b", "-b=true", "-b=false", "-b", "barg", "-b"}); err != nil { + if b.count < 4 { + t.Error(err) + } + } + + if b.count != 4 { + t.Errorf("want: %d; got: %d", 4, b.count) + } + + if err == nil { + t.Error("expected error; got none") + } +} + +func TestSetOutput(t *testing.T) { + var flags FlagSet + var buf bytes.Buffer + flags.SetOutput(&buf) + flags.Init("test", ContinueOnError) + flags.Parse([]string{"-unknown"}) + if out := buf.String(); !strings.Contains(out, "-unknown") { + t.Logf("expected output mentioning unknown; got %q", out) + } +} + +// This tests that one can reset the flags. This still works but not well, and is +// superseded by FlagSet. +func TestChangingArgs(t *testing.T) { + ResetForTesting(func() { t.Fatal("bad parse") }) + oldArgs := os.Args + defer func() { os.Args = oldArgs }() + os.Args = []string{"cmd", "-before", "subcmd", "-after", "args"} + before := Bool([]string{"before"}, false, "") + if err := CommandLine.Parse(os.Args[1:]); err != nil { + t.Fatal(err) + } + cmd := Arg(0) + os.Args = Args() + after := Bool([]string{"after"}, false, "") + Parse() + args := Args() + + if !*before || cmd != "subcmd" || !*after || len(args) != 1 || args[0] != "args" { + t.Fatalf("expected true subcmd true [args] got %v %v %v %v", *before, cmd, *after, args) + } +} + +// Test that -help invokes the usage message and returns ErrHelp. +func TestHelp(t *testing.T) { + var helpCalled = false + fs := NewFlagSet("help test", ContinueOnError) + fs.Usage = func() { helpCalled = true } + var flag bool + fs.BoolVar(&flag, []string{"flag"}, false, "regular flag") + // Regular flag invocation should work + err := fs.Parse([]string{"-flag=true"}) + if err != nil { + t.Fatal("expected no error; got ", err) + } + if !flag { + t.Error("flag was not set by -flag") + } + if helpCalled { + t.Error("help called for regular flag") + helpCalled = false // reset for next test + } + // Help flag should work as expected. + err = fs.Parse([]string{"-help"}) + if err == nil { + t.Fatal("error expected") + } + if err != ErrHelp { + t.Fatal("expected ErrHelp; got ", err) + } + if !helpCalled { + t.Fatal("help was not called") + } + // If we define a help flag, that should override. + var help bool + fs.BoolVar(&help, []string{"help"}, false, "help flag") + helpCalled = false + err = fs.Parse([]string{"-help"}) + if err != nil { + t.Fatal("expected no error for defined -help; got ", err) + } + if helpCalled { + t.Fatal("help was called; should not have been for defined help flag") + } +} + +// Test the flag count functions. +func TestFlagCounts(t *testing.T) { + fs := NewFlagSet("help test", ContinueOnError) + var flag bool + fs.BoolVar(&flag, []string{"flag1"}, false, "regular flag") + fs.BoolVar(&flag, []string{"#deprecated1"}, false, "regular flag") + fs.BoolVar(&flag, []string{"f", "flag2"}, false, "regular flag") + fs.BoolVar(&flag, []string{"#d", "#deprecated2"}, false, "regular flag") + fs.BoolVar(&flag, []string{"flag3"}, false, "regular flag") + fs.BoolVar(&flag, []string{"g", "#flag4", "-flag4"}, false, "regular flag") + + if fs.FlagCount() != 6 { + t.Fatal("FlagCount wrong. ", fs.FlagCount()) + } + if fs.FlagCountUndeprecated() != 4 { + t.Fatal("FlagCountUndeprecated wrong. ", fs.FlagCountUndeprecated()) + } + if fs.NFlag() != 0 { + t.Fatal("NFlag wrong. ", fs.NFlag()) + } + err := fs.Parse([]string{"-fd", "-g", "-flag4"}) + if err != nil { + t.Fatal("expected no error for defined -help; got ", err) + } + if fs.NFlag() != 4 { + t.Fatal("NFlag wrong. ", fs.NFlag()) + } +} + +// Show up bug in sortFlags +func TestSortFlags(t *testing.T) { + fs := NewFlagSet("help TestSortFlags", ContinueOnError) + + var err error + + var b bool + fs.BoolVar(&b, []string{"b", "-banana"}, false, "usage") + + err = fs.Parse([]string{"--banana=true"}) + if err != nil { + t.Fatal("expected no error; got ", err) + } + + count := 0 + + fs.VisitAll(func(flag *Flag) { + count++ + if flag == nil { + t.Fatal("VisitAll should not return a nil flag") + } + }) + flagcount := fs.FlagCount() + if flagcount != count { + t.Fatalf("FlagCount (%d) != number (%d) of elements visited", flagcount, count) + } + // Make sure its idempotent + if flagcount != fs.FlagCount() { + t.Fatalf("FlagCount (%d) != fs.FlagCount() (%d) of elements visited", flagcount, fs.FlagCount()) + } + + count = 0 + fs.Visit(func(flag *Flag) { + count++ + if flag == nil { + t.Fatal("Visit should not return a nil flag") + } + }) + nflag := fs.NFlag() + if nflag != count { + t.Fatalf("NFlag (%d) != number (%d) of elements visited", nflag, count) + } + if nflag != fs.NFlag() { + t.Fatalf("NFlag (%d) != fs.NFlag() (%d) of elements visited", nflag, fs.NFlag()) + } +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/MAINTAINERS b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/MAINTAINERS new file mode 100644 index 0000000000..1e998f8ac1 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/MAINTAINERS @@ -0,0 +1 @@ +Michael Crosby (@crosbymichael) diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/mount/flags.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/mount/flags.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags.go diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/mount/flags_freebsd.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_freebsd.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/mount/flags_freebsd.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_freebsd.go diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/mount/flags_linux.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_linux.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/mount/flags_linux.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_linux.go diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/mount/flags_unsupported.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_unsupported.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/mount/flags_unsupported.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_unsupported.go diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/mount/mount.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mount.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/mount/mount.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mount.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mount_test.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mount_test.go new file mode 100644 index 0000000000..5c7f1b86a0 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mount_test.go @@ -0,0 +1,137 @@ +package mount + +import ( + "os" + "path" + "testing" +) + +func TestMountOptionsParsing(t *testing.T) { + options := "noatime,ro,size=10k" + + flag, data := parseOptions(options) + + if data != "size=10k" { + t.Fatalf("Expected size=10 got %s", data) + } + + expectedFlag := NOATIME | RDONLY + + if flag != expectedFlag { + t.Fatalf("Expected %d got %d", expectedFlag, flag) + } +} + +func TestMounted(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + sourcePath = path.Join(sourceDir, "file.txt") + targetPath = path.Join(targetDir, "file.txt") + ) + + os.Mkdir(sourceDir, 0777) + os.Mkdir(targetDir, 0777) + + f, err := os.Create(sourcePath) + if err != nil { + t.Fatal(err) + } + f.WriteString("hello") + f.Close() + + f, err = os.Create(targetPath) + if err != nil { + t.Fatal(err) + } + f.Close() + + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + mounted, err := Mounted(targetDir) + if err != nil { + t.Fatal(err) + } + if !mounted { + t.Fatalf("Expected %s to be mounted", targetDir) + } + if _, err := os.Stat(targetDir); err != nil { + t.Fatal(err) + } +} + +func TestMountReadonly(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + sourcePath = path.Join(sourceDir, "file.txt") + targetPath = path.Join(targetDir, "file.txt") + ) + + os.Mkdir(sourceDir, 0777) + os.Mkdir(targetDir, 0777) + + f, err := os.Create(sourcePath) + if err != nil { + t.Fatal(err) + } + f.WriteString("hello") + f.Close() + + f, err = os.Create(targetPath) + if err != nil { + t.Fatal(err) + } + f.Close() + + if err := Mount(sourceDir, targetDir, "none", "bind,ro"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + f, err = os.OpenFile(targetPath, os.O_RDWR, 0777) + if err == nil { + t.Fatal("Should not be able to open a ro file as rw") + } +} + +func TestGetMounts(t *testing.T) { + mounts, err := GetMounts() + if err != nil { + t.Fatal(err) + } + + root := false + for _, entry := range mounts { + if entry.Mountpoint == "/" { + root = true + } + } + + if !root { + t.Fatal("/ should be mounted at least") + } +} diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/mount/mounter_freebsd.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mounter_freebsd.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/mount/mounter_freebsd.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mounter_freebsd.go diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/mount/mounter_linux.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mounter_linux.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/mount/mounter_linux.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mounter_linux.go diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/mount/mounter_unsupported.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mounter_unsupported.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/mount/mounter_unsupported.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mounter_unsupported.go diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/mount/mountinfo.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/mount/mountinfo.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo.go diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/mount/mountinfo_linux.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_linux.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/mount/mountinfo_linux.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_linux.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_linux_test.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_linux_test.go new file mode 100644 index 0000000000..3c214476df --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_linux_test.go @@ -0,0 +1,448 @@ +// +build linux + +package mount + +import ( + "bytes" + "testing" +) + +const ( + fedoraMountinfo = `15 35 0:3 / /proc rw,nosuid,nodev,noexec,relatime shared:5 - proc proc rw + 16 35 0:14 / /sys rw,nosuid,nodev,noexec,relatime shared:6 - sysfs sysfs rw,seclabel + 17 35 0:5 / /dev rw,nosuid shared:2 - devtmpfs devtmpfs rw,seclabel,size=8056484k,nr_inodes=2014121,mode=755 + 18 16 0:15 / /sys/kernel/security rw,nosuid,nodev,noexec,relatime shared:7 - securityfs securityfs rw + 19 16 0:13 / /sys/fs/selinux rw,relatime shared:8 - selinuxfs selinuxfs rw + 20 17 0:16 / /dev/shm rw,nosuid,nodev shared:3 - tmpfs tmpfs rw,seclabel + 21 17 0:10 / /dev/pts rw,nosuid,noexec,relatime shared:4 - devpts devpts rw,seclabel,gid=5,mode=620,ptmxmode=000 + 22 35 0:17 / /run rw,nosuid,nodev shared:21 - tmpfs tmpfs rw,seclabel,mode=755 + 23 16 0:18 / /sys/fs/cgroup rw,nosuid,nodev,noexec shared:9 - tmpfs tmpfs rw,seclabel,mode=755 + 24 23 0:19 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime shared:10 - cgroup cgroup rw,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd + 25 16 0:20 / /sys/fs/pstore rw,nosuid,nodev,noexec,relatime shared:20 - pstore pstore rw + 26 23 0:21 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime shared:11 - cgroup cgroup rw,cpuset,clone_children + 27 23 0:22 / /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:12 - cgroup cgroup rw,cpuacct,cpu,clone_children + 28 23 0:23 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:13 - cgroup cgroup rw,memory,clone_children + 29 23 0:24 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime shared:14 - cgroup cgroup rw,devices,clone_children + 30 23 0:25 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime shared:15 - cgroup cgroup rw,freezer,clone_children + 31 23 0:26 / /sys/fs/cgroup/net_cls rw,nosuid,nodev,noexec,relatime shared:16 - cgroup cgroup rw,net_cls,clone_children + 32 23 0:27 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime shared:17 - cgroup cgroup rw,blkio,clone_children + 33 23 0:28 / /sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime shared:18 - cgroup cgroup rw,perf_event,clone_children + 34 23 0:29 / /sys/fs/cgroup/hugetlb rw,nosuid,nodev,noexec,relatime shared:19 - cgroup cgroup rw,hugetlb,clone_children + 35 1 253:2 / / rw,relatime shared:1 - ext4 /dev/mapper/ssd-root--f20 rw,seclabel,data=ordered + 36 15 0:30 / /proc/sys/fs/binfmt_misc rw,relatime shared:22 - autofs systemd-1 rw,fd=38,pgrp=1,timeout=300,minproto=5,maxproto=5,direct + 37 17 0:12 / /dev/mqueue rw,relatime shared:23 - mqueue mqueue rw,seclabel + 38 35 0:31 / /tmp rw shared:24 - tmpfs tmpfs rw,seclabel + 39 17 0:32 / /dev/hugepages rw,relatime shared:25 - hugetlbfs hugetlbfs rw,seclabel + 40 16 0:7 / /sys/kernel/debug rw,relatime shared:26 - debugfs debugfs rw + 41 16 0:33 / /sys/kernel/config rw,relatime shared:27 - configfs configfs rw + 42 35 0:34 / /var/lib/nfs/rpc_pipefs rw,relatime shared:28 - rpc_pipefs sunrpc rw + 43 15 0:35 / /proc/fs/nfsd rw,relatime shared:29 - nfsd sunrpc rw + 45 35 8:17 / /boot rw,relatime shared:30 - ext4 /dev/sdb1 rw,seclabel,data=ordered + 46 35 253:4 / /home rw,relatime shared:31 - ext4 /dev/mapper/ssd-home rw,seclabel,data=ordered + 47 35 253:5 / /var/lib/libvirt/images rw,noatime,nodiratime shared:32 - ext4 /dev/mapper/ssd-virt rw,seclabel,discard,data=ordered + 48 35 253:12 / /mnt/old rw,relatime shared:33 - ext4 /dev/mapper/HelpDeskRHEL6-FedoraRoot rw,seclabel,data=ordered + 121 22 0:36 / /run/user/1000/gvfs rw,nosuid,nodev,relatime shared:104 - fuse.gvfsd-fuse gvfsd-fuse rw,user_id=1000,group_id=1000 + 124 16 0:37 / /sys/fs/fuse/connections rw,relatime shared:107 - fusectl fusectl rw + 165 38 253:3 / /tmp/mnt rw,relatime shared:147 - ext4 /dev/mapper/ssd-root rw,seclabel,data=ordered + 167 35 253:15 / /var/lib/docker/devicemapper/mnt/aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,relatime shared:149 - ext4 /dev/mapper/docker-253:2-425882-aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,seclabel,discard,stripe=16,data=ordered + 171 35 253:16 / /var/lib/docker/devicemapper/mnt/c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,relatime shared:153 - ext4 /dev/mapper/docker-253:2-425882-c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,seclabel,discard,stripe=16,data=ordered + 175 35 253:17 / /var/lib/docker/devicemapper/mnt/1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,relatime shared:157 - ext4 /dev/mapper/docker-253:2-425882-1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,seclabel,discard,stripe=16,data=ordered + 179 35 253:18 / /var/lib/docker/devicemapper/mnt/d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,relatime shared:161 - ext4 /dev/mapper/docker-253:2-425882-d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,seclabel,discard,stripe=16,data=ordered + 183 35 253:19 / /var/lib/docker/devicemapper/mnt/6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,relatime shared:165 - ext4 /dev/mapper/docker-253:2-425882-6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,seclabel,discard,stripe=16,data=ordered + 187 35 253:20 / /var/lib/docker/devicemapper/mnt/8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,relatime shared:169 - ext4 /dev/mapper/docker-253:2-425882-8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,seclabel,discard,stripe=16,data=ordered + 191 35 253:21 / /var/lib/docker/devicemapper/mnt/c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,relatime shared:173 - ext4 /dev/mapper/docker-253:2-425882-c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,seclabel,discard,stripe=16,data=ordered + 195 35 253:22 / /var/lib/docker/devicemapper/mnt/2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,relatime shared:177 - ext4 /dev/mapper/docker-253:2-425882-2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,seclabel,discard,stripe=16,data=ordered + 199 35 253:23 / /var/lib/docker/devicemapper/mnt/37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,relatime shared:181 - ext4 /dev/mapper/docker-253:2-425882-37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,seclabel,discard,stripe=16,data=ordered + 203 35 253:24 / /var/lib/docker/devicemapper/mnt/aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,relatime shared:185 - ext4 /dev/mapper/docker-253:2-425882-aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,seclabel,discard,stripe=16,data=ordered + 207 35 253:25 / /var/lib/docker/devicemapper/mnt/928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,relatime shared:189 - ext4 /dev/mapper/docker-253:2-425882-928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,seclabel,discard,stripe=16,data=ordered + 211 35 253:26 / /var/lib/docker/devicemapper/mnt/0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,relatime shared:193 - ext4 /dev/mapper/docker-253:2-425882-0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,seclabel,discard,stripe=16,data=ordered + 215 35 253:27 / /var/lib/docker/devicemapper/mnt/d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,relatime shared:197 - ext4 /dev/mapper/docker-253:2-425882-d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,seclabel,discard,stripe=16,data=ordered + 219 35 253:28 / /var/lib/docker/devicemapper/mnt/bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,relatime shared:201 - ext4 /dev/mapper/docker-253:2-425882-bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,seclabel,discard,stripe=16,data=ordered + 223 35 253:29 / /var/lib/docker/devicemapper/mnt/7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,relatime shared:205 - ext4 /dev/mapper/docker-253:2-425882-7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,seclabel,discard,stripe=16,data=ordered + 227 35 253:30 / /var/lib/docker/devicemapper/mnt/c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,relatime shared:209 - ext4 /dev/mapper/docker-253:2-425882-c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,seclabel,discard,stripe=16,data=ordered + 231 35 253:31 / /var/lib/docker/devicemapper/mnt/8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,relatime shared:213 - ext4 /dev/mapper/docker-253:2-425882-8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,seclabel,discard,stripe=16,data=ordered + 235 35 253:32 / /var/lib/docker/devicemapper/mnt/1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,relatime shared:217 - ext4 /dev/mapper/docker-253:2-425882-1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,seclabel,discard,stripe=16,data=ordered + 239 35 253:33 / /var/lib/docker/devicemapper/mnt/e9aa60c60128cad1 rw,relatime shared:221 - ext4 /dev/mapper/docker-253:2-425882-e9aa60c60128cad1 rw,seclabel,discard,stripe=16,data=ordered + 243 35 253:34 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,relatime shared:225 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,seclabel,discard,stripe=16,data=ordered + 247 35 253:35 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,relatime shared:229 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,seclabel,discard,stripe=16,data=ordered + 31 21 0:23 / /DATA/foo_bla_bla rw,relatime - cifs //foo/BLA\040BLA\040BLA/ rw,sec=ntlm,cache=loose,unc=\\foo\BLA BLA BLA,username=my_login,domain=mydomain.com,uid=12345678,forceuid,gid=12345678,forcegid,addr=10.1.30.10,file_mode=0755,dir_mode=0755,nounix,rsize=61440,wsize=65536,actimeo=1` + + ubuntuMountInfo = `15 20 0:14 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw +16 20 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw +17 20 0:5 / /dev rw,relatime - devtmpfs udev rw,size=1015140k,nr_inodes=253785,mode=755 +18 17 0:11 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 +19 20 0:15 / /run rw,nosuid,noexec,relatime - tmpfs tmpfs rw,size=205044k,mode=755 +20 1 253:0 / / rw,relatime - ext4 /dev/disk/by-label/DOROOT rw,errors=remount-ro,data=ordered +21 15 0:16 / /sys/fs/cgroup rw,relatime - tmpfs none rw,size=4k,mode=755 +22 15 0:17 / /sys/fs/fuse/connections rw,relatime - fusectl none rw +23 15 0:6 / /sys/kernel/debug rw,relatime - debugfs none rw +24 15 0:10 / /sys/kernel/security rw,relatime - securityfs none rw +25 19 0:18 / /run/lock rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=5120k +26 21 0:19 / /sys/fs/cgroup/cpuset rw,relatime - cgroup cgroup rw,cpuset,clone_children +27 19 0:20 / /run/shm rw,nosuid,nodev,relatime - tmpfs none rw +28 21 0:21 / /sys/fs/cgroup/cpu rw,relatime - cgroup cgroup rw,cpu +29 19 0:22 / /run/user rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=102400k,mode=755 +30 15 0:23 / /sys/fs/pstore rw,relatime - pstore none rw +31 21 0:24 / /sys/fs/cgroup/cpuacct rw,relatime - cgroup cgroup rw,cpuacct +32 21 0:25 / /sys/fs/cgroup/memory rw,relatime - cgroup cgroup rw,memory +33 21 0:26 / /sys/fs/cgroup/devices rw,relatime - cgroup cgroup rw,devices +34 21 0:27 / /sys/fs/cgroup/freezer rw,relatime - cgroup cgroup rw,freezer +35 21 0:28 / /sys/fs/cgroup/blkio rw,relatime - cgroup cgroup rw,blkio +36 21 0:29 / /sys/fs/cgroup/perf_event rw,relatime - cgroup cgroup rw,perf_event +37 21 0:30 / /sys/fs/cgroup/hugetlb rw,relatime - cgroup cgroup rw,hugetlb +38 21 0:31 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime - cgroup systemd rw,name=systemd +39 20 0:32 / /var/lib/docker/aufs/mnt/b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc rw,relatime - aufs none rw,si=caafa54fdc06525 +40 20 0:33 / /var/lib/docker/aufs/mnt/2eed44ac7ce7c75af04f088ed6cb4ce9d164801e91d78c6db65d7ef6d572bba8-init rw,relatime - aufs none rw,si=caafa54f882b525 +41 20 0:34 / /var/lib/docker/aufs/mnt/2eed44ac7ce7c75af04f088ed6cb4ce9d164801e91d78c6db65d7ef6d572bba8 rw,relatime - aufs none rw,si=caafa54f8829525 +42 20 0:35 / /var/lib/docker/aufs/mnt/16f4d7e96dd612903f425bfe856762f291ff2e36a8ecd55a2209b7d7cd81c30b rw,relatime - aufs none rw,si=caafa54f882d525 +43 20 0:36 / /var/lib/docker/aufs/mnt/63ca08b75d7438a9469a5954e003f48ffede73541f6286ce1cb4d7dd4811da7e-init rw,relatime - aufs none rw,si=caafa54f882f525 +44 20 0:37 / /var/lib/docker/aufs/mnt/63ca08b75d7438a9469a5954e003f48ffede73541f6286ce1cb4d7dd4811da7e rw,relatime - aufs none rw,si=caafa54f88ba525 +45 20 0:38 / /var/lib/docker/aufs/mnt/283f35a910233c756409313be71ecd8fcfef0df57108b8d740b61b3e88860452 rw,relatime - aufs none rw,si=caafa54f88b8525 +46 20 0:39 / /var/lib/docker/aufs/mnt/2c6c7253d4090faa3886871fb21bd660609daeb0206588c0602007f7d0f254b1-init rw,relatime - aufs none rw,si=caafa54f88be525 +47 20 0:40 / /var/lib/docker/aufs/mnt/2c6c7253d4090faa3886871fb21bd660609daeb0206588c0602007f7d0f254b1 rw,relatime - aufs none rw,si=caafa54f882c525 +48 20 0:41 / /var/lib/docker/aufs/mnt/de2b538c97d6366cc80e8658547c923ea1d042f85580df379846f36a4df7049d rw,relatime - aufs none rw,si=caafa54f85bb525 +49 20 0:42 / /var/lib/docker/aufs/mnt/94a3d8ed7c27e5b0aa71eba46c736bfb2742afda038e74f2dd6035fb28415b49-init rw,relatime - aufs none rw,si=caafa54fdc00525 +50 20 0:43 / /var/lib/docker/aufs/mnt/94a3d8ed7c27e5b0aa71eba46c736bfb2742afda038e74f2dd6035fb28415b49 rw,relatime - aufs none rw,si=caafa54fbaec525 +51 20 0:44 / /var/lib/docker/aufs/mnt/6ac1cace985c9fc9bea32234de8b36dba49bdd5e29a2972b327ff939d78a6274 rw,relatime - aufs none rw,si=caafa54f8e1a525 +52 20 0:45 / /var/lib/docker/aufs/mnt/dff147033e3a0ef061e1de1ad34256b523d4a8c1fa6bba71a0ab538e8628ff0b-init rw,relatime - aufs none rw,si=caafa54f8e1d525 +53 20 0:46 / /var/lib/docker/aufs/mnt/dff147033e3a0ef061e1de1ad34256b523d4a8c1fa6bba71a0ab538e8628ff0b rw,relatime - aufs none rw,si=caafa54f8e1b525 +54 20 0:47 / /var/lib/docker/aufs/mnt/cabb117d997f0f93519185aea58389a9762770b7496ed0b74a3e4a083fa45902 rw,relatime - aufs none rw,si=caafa54f810a525 +55 20 0:48 / /var/lib/docker/aufs/mnt/e1c8a94ffaa9d532bbbdc6ef771ce8a6c2c06757806ecaf8b68e9108fec65f33-init rw,relatime - aufs none rw,si=caafa54f8529525 +56 20 0:49 / /var/lib/docker/aufs/mnt/e1c8a94ffaa9d532bbbdc6ef771ce8a6c2c06757806ecaf8b68e9108fec65f33 rw,relatime - aufs none rw,si=caafa54f852f525 +57 20 0:50 / /var/lib/docker/aufs/mnt/16a1526fa445b84ce84f89506d219e87fa488a814063baf045d88b02f21166b3 rw,relatime - aufs none rw,si=caafa54f9e1d525 +58 20 0:51 / /var/lib/docker/aufs/mnt/57b9c92e1e368fa7dbe5079f7462e917777829caae732828b003c355fe49da9f-init rw,relatime - aufs none rw,si=caafa54f854d525 +59 20 0:52 / /var/lib/docker/aufs/mnt/57b9c92e1e368fa7dbe5079f7462e917777829caae732828b003c355fe49da9f rw,relatime - aufs none rw,si=caafa54f854e525 +60 20 0:53 / /var/lib/docker/aufs/mnt/e370c3e286bea027917baa0e4d251262681a472a87056e880dfd0513516dffd9 rw,relatime - aufs none rw,si=caafa54f840a525 +61 20 0:54 / /var/lib/docker/aufs/mnt/6b00d3b4f32b41997ec07412b5e18204f82fbe643e7122251cdeb3582abd424e-init rw,relatime - aufs none rw,si=caafa54f8408525 +62 20 0:55 / /var/lib/docker/aufs/mnt/6b00d3b4f32b41997ec07412b5e18204f82fbe643e7122251cdeb3582abd424e rw,relatime - aufs none rw,si=caafa54f8409525 +63 20 0:56 / /var/lib/docker/aufs/mnt/abd0b5ea5d355a67f911475e271924a5388ee60c27185fcd60d095afc4a09dc7 rw,relatime - aufs none rw,si=caafa54f9eb1525 +64 20 0:57 / /var/lib/docker/aufs/mnt/336222effc3f7b89867bb39ff7792ae5412c35c749f127c29159d046b6feedd2-init rw,relatime - aufs none rw,si=caafa54f85bf525 +65 20 0:58 / /var/lib/docker/aufs/mnt/336222effc3f7b89867bb39ff7792ae5412c35c749f127c29159d046b6feedd2 rw,relatime - aufs none rw,si=caafa54f85b8525 +66 20 0:59 / /var/lib/docker/aufs/mnt/912e1bf28b80a09644503924a8a1a4fb8ed10b808ca847bda27a369919aa52fa rw,relatime - aufs none rw,si=caafa54fbaea525 +67 20 0:60 / /var/lib/docker/aufs/mnt/386f722875013b4a875118367abc783fc6617a3cb7cf08b2b4dcf550b4b9c576-init rw,relatime - aufs none rw,si=caafa54f8472525 +68 20 0:61 / /var/lib/docker/aufs/mnt/386f722875013b4a875118367abc783fc6617a3cb7cf08b2b4dcf550b4b9c576 rw,relatime - aufs none rw,si=caafa54f8474525 +69 20 0:62 / /var/lib/docker/aufs/mnt/5aaebb79ef3097dfca377889aeb61a0c9d5e3795117d2b08d0751473c671dfb2 rw,relatime - aufs none rw,si=caafa54f8c5e525 +70 20 0:63 / /var/lib/docker/aufs/mnt/5ba3e493279d01277d583600b81c7c079e691b73c3a2bdea8e4b12a35a418be2-init rw,relatime - aufs none rw,si=caafa54f8c3b525 +71 20 0:64 / /var/lib/docker/aufs/mnt/5ba3e493279d01277d583600b81c7c079e691b73c3a2bdea8e4b12a35a418be2 rw,relatime - aufs none rw,si=caafa54f8c3d525 +72 20 0:65 / /var/lib/docker/aufs/mnt/2777f0763da4de93f8bebbe1595cc77f739806a158657b033eca06f827b6028a rw,relatime - aufs none rw,si=caafa54f8c3e525 +73 20 0:66 / /var/lib/docker/aufs/mnt/5d7445562acf73c6f0ae34c3dd0921d7457de1ba92a587d9e06a44fa209eeb3e-init rw,relatime - aufs none rw,si=caafa54f8c39525 +74 20 0:67 / /var/lib/docker/aufs/mnt/5d7445562acf73c6f0ae34c3dd0921d7457de1ba92a587d9e06a44fa209eeb3e rw,relatime - aufs none rw,si=caafa54f854f525 +75 20 0:68 / /var/lib/docker/aufs/mnt/06400b526ec18b66639c96efc41a84f4ae0b117cb28dafd56be420651b4084a0 rw,relatime - aufs none rw,si=caafa54f840b525 +76 20 0:69 / /var/lib/docker/aufs/mnt/e051d45ec42d8e3e1cc57bb39871a40de486dc123522e9c067fbf2ca6a357785-init rw,relatime - aufs none rw,si=caafa54fdddf525 +77 20 0:70 / /var/lib/docker/aufs/mnt/e051d45ec42d8e3e1cc57bb39871a40de486dc123522e9c067fbf2ca6a357785 rw,relatime - aufs none rw,si=caafa54f854b525 +78 20 0:71 / /var/lib/docker/aufs/mnt/1ff414fa93fd61ec81b0ab7b365a841ff6545accae03cceac702833aaeaf718f rw,relatime - aufs none rw,si=caafa54f8d85525 +79 20 0:72 / /var/lib/docker/aufs/mnt/c661b2f871dd5360e46a2aebf8f970f6d39a2ff64e06979aa0361227c88128b8-init rw,relatime - aufs none rw,si=caafa54f8da3525 +80 20 0:73 / /var/lib/docker/aufs/mnt/c661b2f871dd5360e46a2aebf8f970f6d39a2ff64e06979aa0361227c88128b8 rw,relatime - aufs none rw,si=caafa54f8da2525 +81 20 0:74 / /var/lib/docker/aufs/mnt/b68b1d4fe4d30016c552398e78b379a39f651661d8e1fa5f2460c24a5e723420 rw,relatime - aufs none rw,si=caafa54f8d81525 +82 20 0:75 / /var/lib/docker/aufs/mnt/c5c5979c936cd0153a4c626fa9d69ce4fce7d924cc74fa68b025d2f585031739-init rw,relatime - aufs none rw,si=caafa54f8da1525 +83 20 0:76 / /var/lib/docker/aufs/mnt/c5c5979c936cd0153a4c626fa9d69ce4fce7d924cc74fa68b025d2f585031739 rw,relatime - aufs none rw,si=caafa54f8da0525 +84 20 0:77 / /var/lib/docker/aufs/mnt/53e10b0329afc0e0d3322d31efaed4064139dc7027fe6ae445cffd7104bcc94f rw,relatime - aufs none rw,si=caafa54f8c35525 +85 20 0:78 / /var/lib/docker/aufs/mnt/3bfafd09ff2603e2165efacc2215c1f51afabba6c42d04a68cc2df0e8cc31494-init rw,relatime - aufs none rw,si=caafa54f8db8525 +86 20 0:79 / /var/lib/docker/aufs/mnt/3bfafd09ff2603e2165efacc2215c1f51afabba6c42d04a68cc2df0e8cc31494 rw,relatime - aufs none rw,si=caafa54f8dba525 +87 20 0:80 / /var/lib/docker/aufs/mnt/90fdd2c03eeaf65311f88f4200e18aef6d2772482712d9aea01cd793c64781b5 rw,relatime - aufs none rw,si=caafa54f8315525 +88 20 0:81 / /var/lib/docker/aufs/mnt/7bdf2591c06c154ceb23f5e74b1d03b18fbf6fe96e35fbf539b82d446922442f-init rw,relatime - aufs none rw,si=caafa54f8fc6525 +89 20 0:82 / /var/lib/docker/aufs/mnt/7bdf2591c06c154ceb23f5e74b1d03b18fbf6fe96e35fbf539b82d446922442f rw,relatime - aufs none rw,si=caafa54f8468525 +90 20 0:83 / /var/lib/docker/aufs/mnt/8cf9a993f50f3305abad3da268c0fc44ff78a1e7bba595ef9de963497496c3f9 rw,relatime - aufs none rw,si=caafa54f8c59525 +91 20 0:84 / /var/lib/docker/aufs/mnt/ecc896fd74b21840a8d35e8316b92a08b1b9c83d722a12acff847e9f0ff17173-init rw,relatime - aufs none rw,si=caafa54f846a525 +92 20 0:85 / /var/lib/docker/aufs/mnt/ecc896fd74b21840a8d35e8316b92a08b1b9c83d722a12acff847e9f0ff17173 rw,relatime - aufs none rw,si=caafa54f846b525 +93 20 0:86 / /var/lib/docker/aufs/mnt/d8c8288ec920439a48b5796bab5883ee47a019240da65e8d8f33400c31bac5df rw,relatime - aufs none rw,si=caafa54f8dbf525 +94 20 0:87 / /var/lib/docker/aufs/mnt/ecba66710bcd03199b9398e46c005cd6b68d0266ec81dc8b722a29cc417997c6-init rw,relatime - aufs none rw,si=caafa54f810f525 +95 20 0:88 / /var/lib/docker/aufs/mnt/ecba66710bcd03199b9398e46c005cd6b68d0266ec81dc8b722a29cc417997c6 rw,relatime - aufs none rw,si=caafa54fbae9525 +96 20 0:89 / /var/lib/docker/aufs/mnt/befc1c67600df449dddbe796c0d06da7caff1d2bbff64cde1f0ba82d224996b5 rw,relatime - aufs none rw,si=caafa54f8dab525 +97 20 0:90 / /var/lib/docker/aufs/mnt/c9f470e73d2742629cdc4084a1b2c1a8302914f2aa0d0ec4542371df9a050562-init rw,relatime - aufs none rw,si=caafa54fdc02525 +98 20 0:91 / /var/lib/docker/aufs/mnt/c9f470e73d2742629cdc4084a1b2c1a8302914f2aa0d0ec4542371df9a050562 rw,relatime - aufs none rw,si=caafa54f9eb0525 +99 20 0:92 / /var/lib/docker/aufs/mnt/2a31f10029f04ff9d4381167a9b739609853d7220d55a56cb654779a700ee246 rw,relatime - aufs none rw,si=caafa54f8c37525 +100 20 0:93 / /var/lib/docker/aufs/mnt/8c4261b8e3e4b21ebba60389bd64b6261217e7e6b9fd09e201d5a7f6760f6927-init rw,relatime - aufs none rw,si=caafa54fd173525 +101 20 0:94 / /var/lib/docker/aufs/mnt/8c4261b8e3e4b21ebba60389bd64b6261217e7e6b9fd09e201d5a7f6760f6927 rw,relatime - aufs none rw,si=caafa54f8108525 +102 20 0:95 / /var/lib/docker/aufs/mnt/eaa0f57403a3dc685268f91df3fbcd7a8423cee50e1a9ee5c3e1688d9d676bb4 rw,relatime - aufs none rw,si=caafa54f852d525 +103 20 0:96 / /var/lib/docker/aufs/mnt/9cfe69a2cbffd9bfc7f396d4754f6fe5cc457ef417b277797be3762dfe955a6b-init rw,relatime - aufs none rw,si=caafa54f8d80525 +104 20 0:97 / /var/lib/docker/aufs/mnt/9cfe69a2cbffd9bfc7f396d4754f6fe5cc457ef417b277797be3762dfe955a6b rw,relatime - aufs none rw,si=caafa54f8fc3525 +105 20 0:98 / /var/lib/docker/aufs/mnt/d1b322ae17613c6adee84e709641a9244ac56675244a89a64dc0075075fcbb83 rw,relatime - aufs none rw,si=caafa54f8c58525 +106 20 0:99 / /var/lib/docker/aufs/mnt/d46c2a8e9da7e91ab34fd9c192851c246a4e770a46720bda09e55c7554b9dbbd-init rw,relatime - aufs none rw,si=caafa54f8c63525 +107 20 0:100 / /var/lib/docker/aufs/mnt/d46c2a8e9da7e91ab34fd9c192851c246a4e770a46720bda09e55c7554b9dbbd rw,relatime - aufs none rw,si=caafa54f8c67525 +108 20 0:101 / /var/lib/docker/aufs/mnt/bc9d2a264158f83a617a069bf17cbbf2a2ba453db7d3951d9dc63cc1558b1c2b rw,relatime - aufs none rw,si=caafa54f8dbe525 +109 20 0:102 / /var/lib/docker/aufs/mnt/9e6abb8d72bbeb4d5cf24b96018528015ba830ce42b4859965bd482cbd034e99-init rw,relatime - aufs none rw,si=caafa54f9e0d525 +110 20 0:103 / /var/lib/docker/aufs/mnt/9e6abb8d72bbeb4d5cf24b96018528015ba830ce42b4859965bd482cbd034e99 rw,relatime - aufs none rw,si=caafa54f9e1b525 +111 20 0:104 / /var/lib/docker/aufs/mnt/d4dca7b02569c732e740071e1c654d4ad282de5c41edb619af1f0aafa618be26 rw,relatime - aufs none rw,si=caafa54f8dae525 +112 20 0:105 / /var/lib/docker/aufs/mnt/fea63da40fa1c5ffbad430dde0bc64a8fc2edab09a051fff55b673c40a08f6b7-init rw,relatime - aufs none rw,si=caafa54f8c5c525 +113 20 0:106 / /var/lib/docker/aufs/mnt/fea63da40fa1c5ffbad430dde0bc64a8fc2edab09a051fff55b673c40a08f6b7 rw,relatime - aufs none rw,si=caafa54fd172525 +114 20 0:107 / /var/lib/docker/aufs/mnt/e60c57499c0b198a6734f77f660cdbbd950a5b78aa23f470ca4f0cfcc376abef rw,relatime - aufs none rw,si=caafa54909c4525 +115 20 0:108 / /var/lib/docker/aufs/mnt/099c78e7ccd9c8717471bb1bbfff838c0a9913321ba2f214fbeaf92c678e5b35-init rw,relatime - aufs none rw,si=caafa54909c3525 +116 20 0:109 / /var/lib/docker/aufs/mnt/099c78e7ccd9c8717471bb1bbfff838c0a9913321ba2f214fbeaf92c678e5b35 rw,relatime - aufs none rw,si=caafa54909c7525 +117 20 0:110 / /var/lib/docker/aufs/mnt/2997be666d58b9e71469759bcb8bd9608dad0e533a1a7570a896919ba3388825 rw,relatime - aufs none rw,si=caafa54f8557525 +118 20 0:111 / /var/lib/docker/aufs/mnt/730694eff438ef20569df38dfb38a920969d7ff2170cc9aa7cb32a7ed8147a93-init rw,relatime - aufs none rw,si=caafa54c6e88525 +119 20 0:112 / /var/lib/docker/aufs/mnt/730694eff438ef20569df38dfb38a920969d7ff2170cc9aa7cb32a7ed8147a93 rw,relatime - aufs none rw,si=caafa54c6e8e525 +120 20 0:113 / /var/lib/docker/aufs/mnt/a672a1e2f2f051f6e19ed1dfbe80860a2d774174c49f7c476695f5dd1d5b2f67 rw,relatime - aufs none rw,si=caafa54c6e15525 +121 20 0:114 / /var/lib/docker/aufs/mnt/aba3570e17859f76cf29d282d0d150659c6bd80780fdc52a465ba05245c2a420-init rw,relatime - aufs none rw,si=caafa54f8dad525 +122 20 0:115 / /var/lib/docker/aufs/mnt/aba3570e17859f76cf29d282d0d150659c6bd80780fdc52a465ba05245c2a420 rw,relatime - aufs none rw,si=caafa54f8d84525 +123 20 0:116 / /var/lib/docker/aufs/mnt/2abc86007aca46fb4a817a033e2a05ccacae40b78ea4b03f8ea616b9ada40e2e rw,relatime - aufs none rw,si=caafa54c6e8b525 +124 20 0:117 / /var/lib/docker/aufs/mnt/36352f27f7878e648367a135bd1ec3ed497adcb8ac13577ee892a0bd921d2374-init rw,relatime - aufs none rw,si=caafa54c6e8d525 +125 20 0:118 / /var/lib/docker/aufs/mnt/36352f27f7878e648367a135bd1ec3ed497adcb8ac13577ee892a0bd921d2374 rw,relatime - aufs none rw,si=caafa54f8c34525 +126 20 0:119 / /var/lib/docker/aufs/mnt/2f95ca1a629cea8363b829faa727dd52896d5561f2c96ddee4f697ea2fc872c2 rw,relatime - aufs none rw,si=caafa54c6e8a525 +127 20 0:120 / /var/lib/docker/aufs/mnt/f108c8291654f179ef143a3e07de2b5a34adbc0b28194a0ab17742b6db9a7fb2-init rw,relatime - aufs none rw,si=caafa54f8e19525 +128 20 0:121 / /var/lib/docker/aufs/mnt/f108c8291654f179ef143a3e07de2b5a34adbc0b28194a0ab17742b6db9a7fb2 rw,relatime - aufs none rw,si=caafa54fa8c6525 +129 20 0:122 / /var/lib/docker/aufs/mnt/c1d04dfdf8cccb3676d5a91e84e9b0781ce40623d127d038bcfbe4c761b27401 rw,relatime - aufs none rw,si=caafa54f8c30525 +130 20 0:123 / /var/lib/docker/aufs/mnt/3f4898ffd0e1239aeebf1d1412590cdb7254207fa3883663e2c40cf772e5f05a-init rw,relatime - aufs none rw,si=caafa54c6e1a525 +131 20 0:124 / /var/lib/docker/aufs/mnt/3f4898ffd0e1239aeebf1d1412590cdb7254207fa3883663e2c40cf772e5f05a rw,relatime - aufs none rw,si=caafa54c6e1c525 +132 20 0:125 / /var/lib/docker/aufs/mnt/5ae3b6fccb1539fc02d420e86f3e9637bef5b711fed2ca31a2f426c8f5deddbf rw,relatime - aufs none rw,si=caafa54c4fea525 +133 20 0:126 / /var/lib/docker/aufs/mnt/310bfaf80d57020f2e73b06aeffb0b9b0ca2f54895f88bf5e4d1529ccac58fe0-init rw,relatime - aufs none rw,si=caafa54c6e1e525 +134 20 0:127 / /var/lib/docker/aufs/mnt/310bfaf80d57020f2e73b06aeffb0b9b0ca2f54895f88bf5e4d1529ccac58fe0 rw,relatime - aufs none rw,si=caafa54fa8c0525 +135 20 0:128 / /var/lib/docker/aufs/mnt/f382bd5aaccaf2d04a59089ac7cb12ec87efd769fd0c14d623358fbfd2a3f896 rw,relatime - aufs none rw,si=caafa54c4fec525 +136 20 0:129 / /var/lib/docker/aufs/mnt/50d45e9bb2d779bc6362824085564c7578c231af5ae3b3da116acf7e17d00735-init rw,relatime - aufs none rw,si=caafa54c4fef525 +137 20 0:130 / /var/lib/docker/aufs/mnt/50d45e9bb2d779bc6362824085564c7578c231af5ae3b3da116acf7e17d00735 rw,relatime - aufs none rw,si=caafa54c4feb525 +138 20 0:131 / /var/lib/docker/aufs/mnt/a9c5ee0854dc083b6bf62b7eb1e5291aefbb10702289a446471ce73aba0d5d7d rw,relatime - aufs none rw,si=caafa54909c6525 +139 20 0:134 / /var/lib/docker/aufs/mnt/03a613e7bd5078819d1fd92df4e671c0127559a5e0b5a885cc8d5616875162f0-init rw,relatime - aufs none rw,si=caafa54804fe525 +140 20 0:135 / /var/lib/docker/aufs/mnt/03a613e7bd5078819d1fd92df4e671c0127559a5e0b5a885cc8d5616875162f0 rw,relatime - aufs none rw,si=caafa54804fa525 +141 20 0:136 / /var/lib/docker/aufs/mnt/7ec3277e5c04c907051caf9c9c35889f5fcd6463e5485971b25404566830bb70 rw,relatime - aufs none rw,si=caafa54804f9525 +142 20 0:139 / /var/lib/docker/aufs/mnt/26b5b5d71d79a5b2bfcf8bc4b2280ee829f261eb886745dd90997ed410f7e8b8-init rw,relatime - aufs none rw,si=caafa54c6ef6525 +143 20 0:140 / /var/lib/docker/aufs/mnt/26b5b5d71d79a5b2bfcf8bc4b2280ee829f261eb886745dd90997ed410f7e8b8 rw,relatime - aufs none rw,si=caafa54c6ef5525 +144 20 0:356 / /var/lib/docker/aufs/mnt/e6ecde9e2c18cd3c75f424c67b6d89685cfee0fc67abf2cb6bdc0867eb998026 rw,relatime - aufs none rw,si=caafa548068e525` + + gentooMountinfo = `15 1 8:6 / / rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +16 15 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw +17 15 0:14 / /run rw,nosuid,nodev,relatime - tmpfs tmpfs rw,size=3292172k,mode=755 +18 15 0:5 / /dev rw,nosuid,relatime - devtmpfs udev rw,size=10240k,nr_inodes=4106451,mode=755 +19 18 0:12 / /dev/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw +20 18 0:10 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 +21 18 0:15 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw +22 15 0:16 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw +23 22 0:7 / /sys/kernel/debug rw,nosuid,nodev,noexec,relatime - debugfs debugfs rw +24 22 0:17 / /sys/fs/cgroup rw,nosuid,nodev,noexec,relatime - tmpfs cgroup_root rw,size=10240k,mode=755 +25 24 0:18 / /sys/fs/cgroup/openrc rw,nosuid,nodev,noexec,relatime - cgroup openrc rw,release_agent=/lib64/rc/sh/cgroup-release-agent.sh,name=openrc +26 24 0:19 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime - cgroup cpuset rw,cpuset,clone_children +27 24 0:20 / /sys/fs/cgroup/cpu rw,nosuid,nodev,noexec,relatime - cgroup cpu rw,cpu,clone_children +28 24 0:21 / /sys/fs/cgroup/cpuacct rw,nosuid,nodev,noexec,relatime - cgroup cpuacct rw,cpuacct,clone_children +29 24 0:22 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime - cgroup memory rw,memory,clone_children +30 24 0:23 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime - cgroup devices rw,devices,clone_children +31 24 0:24 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime - cgroup freezer rw,freezer,clone_children +32 24 0:25 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime - cgroup blkio rw,blkio,clone_children +33 15 8:1 / /boot rw,noatime,nodiratime - vfat /dev/sda1 rw,fmask=0022,dmask=0022,codepage=437,iocharset=iso8859-1,shortname=mixed,errors=remount-ro +34 15 8:18 / /mnt/xfs rw,noatime,nodiratime - xfs /dev/sdb2 rw,attr2,inode64,noquota +35 15 0:26 / /tmp rw,relatime - tmpfs tmpfs rw +36 16 0:27 / /proc/sys/fs/binfmt_misc rw,nosuid,nodev,noexec,relatime - binfmt_misc binfmt_misc rw +42 15 0:33 / /var/lib/nfs/rpc_pipefs rw,relatime - rpc_pipefs rpc_pipefs rw +43 16 0:34 / /proc/fs/nfsd rw,nosuid,nodev,noexec,relatime - nfsd nfsd rw +44 15 0:35 / /home/tianon/.gvfs rw,nosuid,nodev,relatime - fuse.gvfs-fuse-daemon gvfs-fuse-daemon rw,user_id=1000,group_id=1000 +68 15 0:3336 / /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd rw,relatime - aufs none rw,si=9b4a7640128db39c +85 68 8:6 /var/lib/docker/init/dockerinit-0.7.2-dev//deleted /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/.dockerinit rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +86 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/config.env /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/.dockerenv rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +87 68 8:6 /etc/resolv.conf /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/resolv.conf rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +88 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/hostname /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/hostname rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +89 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/hosts /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/hosts rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +38 15 0:3384 / /var/lib/docker/aufs/mnt/0292005a9292401bb5197657f2b682d97d8edcb3b72b5e390d2a680139985b55 rw,relatime - aufs none rw,si=9b4a7642b584939c +39 15 0:3385 / /var/lib/docker/aufs/mnt/59db98c889de5f71b70cfb82c40cbe47b64332f0f56042a2987a9e5df6e5e3aa rw,relatime - aufs none rw,si=9b4a7642b584e39c +40 15 0:3386 / /var/lib/docker/aufs/mnt/0545f0f2b6548eb9601d08f35a08f5a0a385407d36027a28f58e06e9f61e0278 rw,relatime - aufs none rw,si=9b4a7642b584b39c +41 15 0:3387 / /var/lib/docker/aufs/mnt/d882cfa16d1aa8fe0331a36e79be3d80b151e49f24fc39a39c3fed1735d5feb5 rw,relatime - aufs none rw,si=9b4a76453040039c +45 15 0:3388 / /var/lib/docker/aufs/mnt/055ca3befcb1626e74f5344b3398724ff05c0de0e20021683d04305c9e70a3f6 rw,relatime - aufs none rw,si=9b4a76453040739c +46 15 0:3389 / /var/lib/docker/aufs/mnt/b899e4567a351745d4285e7f1c18fdece75d877deb3041981cd290be348b7aa6 rw,relatime - aufs none rw,si=9b4a7647def4039c +47 15 0:3390 / /var/lib/docker/aufs/mnt/067ca040292c58954c5129f953219accfae0d40faca26b4d05e76ca76a998f16 rw,relatime - aufs none rw,si=9b4a7647def4239c +48 15 0:3391 / /var/lib/docker/aufs/mnt/8c995e7cb6e5082742daeea720e340b021d288d25d92e0412c03d200df308a11 rw,relatime - aufs none rw,si=9b4a764479c1639c +49 15 0:3392 / /var/lib/docker/aufs/mnt/07cc54dfae5b45300efdacdd53cc72c01b9044956a86ce7bff42d087e426096d rw,relatime - aufs none rw,si=9b4a764479c1739c +50 15 0:3393 / /var/lib/docker/aufs/mnt/0a9c95cf4c589c05b06baa79150b0cc1d8e7102759fe3ce4afaabb8247ca4f85 rw,relatime - aufs none rw,si=9b4a7644059c839c +51 15 0:3394 / /var/lib/docker/aufs/mnt/468fa98cececcf4e226e8370f18f4f848d63faf287fb8321a07f73086441a3a0 rw,relatime - aufs none rw,si=9b4a7644059ca39c +52 15 0:3395 / /var/lib/docker/aufs/mnt/0b826192231c5ce066fffb5beff4397337b5fc19a377aa7c6282c7c0ce7f111f rw,relatime - aufs none rw,si=9b4a764479c1339c +53 15 0:3396 / /var/lib/docker/aufs/mnt/93b8ba1b772fbe79709b909c43ea4b2c30d712e53548f467db1ffdc7a384f196 rw,relatime - aufs none rw,si=9b4a7640798a739c +54 15 0:3397 / /var/lib/docker/aufs/mnt/0c0d0acfb506859b12ef18cdfef9ebed0b43a611482403564224bde9149d373c rw,relatime - aufs none rw,si=9b4a7640798a039c +55 15 0:3398 / /var/lib/docker/aufs/mnt/33648c39ab6c7c74af0243d6d6a81b052e9e25ad1e04b19892eb2dde013e358b rw,relatime - aufs none rw,si=9b4a7644b439b39c +56 15 0:3399 / /var/lib/docker/aufs/mnt/0c12bea97a1c958a3c739fb148536c1c89351d48e885ecda8f0499b5cc44407e rw,relatime - aufs none rw,si=9b4a7640798a239c +57 15 0:3400 / /var/lib/docker/aufs/mnt/ed443988ce125f172d7512e84a4de2627405990fd767a16adefa8ce700c19ce8 rw,relatime - aufs none rw,si=9b4a7644c8ed339c +59 15 0:3402 / /var/lib/docker/aufs/mnt/f61612c324ff3c924d3f7a82fb00a0f8d8f73c248c41897061949e9f5ab7e3b1 rw,relatime - aufs none rw,si=9b4a76442810c39c +60 15 0:3403 / /var/lib/docker/aufs/mnt/0f1ee55c6c4e25027b80de8e64b8b6fb542b3b41aa0caab9261da75752e22bfd rw,relatime - aufs none rw,si=9b4a76442810e39c +61 15 0:3404 / /var/lib/docker/aufs/mnt/956f6cc4af5785cb3ee6963dcbca668219437d9b28f513290b1453ac64a34f97 rw,relatime - aufs none rw,si=9b4a7644303ec39c +62 15 0:3405 / /var/lib/docker/aufs/mnt/1099769158c4b4773e2569e38024e8717e400f87a002c41d8cf47cb81b051ba6 rw,relatime - aufs none rw,si=9b4a7644303ee39c +63 15 0:3406 / /var/lib/docker/aufs/mnt/11890ceb98d4442595b676085cd7b21550ab85c5df841e0fba997ff54e3d522d rw,relatime - aufs none rw,si=9b4a7644303ed39c +64 15 0:3407 / /var/lib/docker/aufs/mnt/acdb90dc378e8ed2420b43a6d291f1c789a081cd1904018780cc038fcd7aae53 rw,relatime - aufs none rw,si=9b4a76434be2139c +65 15 0:3408 / /var/lib/docker/aufs/mnt/120e716f19d4714fbe63cc1ed246204f2c1106eefebc6537ba2587d7e7711959 rw,relatime - aufs none rw,si=9b4a76434be2339c +66 15 0:3409 / /var/lib/docker/aufs/mnt/b197b7fffb61d89e0ba1c40de9a9fc0d912e778b3c1bd828cf981ff37c1963bc rw,relatime - aufs none rw,si=9b4a76434be2039c +70 15 0:3412 / /var/lib/docker/aufs/mnt/1434b69d2e1bb18a9f0b96b9cdac30132b2688f5d1379f68a39a5e120c2f93eb rw,relatime - aufs none rw,si=9b4a76434be2639c +71 15 0:3413 / /var/lib/docker/aufs/mnt/16006e83caf33ab5eb0cd6afc92ea2ee8edeff897496b0bb3ec3a75b767374b3 rw,relatime - aufs none rw,si=9b4a7644d790439c +72 15 0:3414 / /var/lib/docker/aufs/mnt/55bfa5f44e94d27f91f79ba901b118b15098449165c87abf1b53ffff147ff164 rw,relatime - aufs none rw,si=9b4a7644d790239c +73 15 0:3415 / /var/lib/docker/aufs/mnt/1912b97a07ab21ccd98a2a27bc779bf3cf364a3138afa3c3e6f7f169a3c3eab5 rw,relatime - aufs none rw,si=9b4a76441822739c +76 15 0:3418 / /var/lib/docker/aufs/mnt/1a7c3292e8879bd91ffd9282e954f643b1db5683093574c248ff14a9609f2f56 rw,relatime - aufs none rw,si=9b4a76438cb7239c +77 15 0:3419 / /var/lib/docker/aufs/mnt/bb1faaf0d076ddba82c2318305a85f490dafa4e8a8640a8db8ed657c439120cc rw,relatime - aufs none rw,si=9b4a76438cb7339c +78 15 0:3420 / /var/lib/docker/aufs/mnt/1ab869f21d2241a73ac840c7f988490313f909ac642eba71d092204fec66dd7c rw,relatime - aufs none rw,si=9b4a76438cb7639c +79 15 0:3421 / /var/lib/docker/aufs/mnt/fd7245b2cfe3890fa5f5b452260e4edf9e7fb7746532ed9d83f7a0d7dbaa610e rw,relatime - aufs none rw,si=9b4a7644bdc0139c +80 15 0:3422 / /var/lib/docker/aufs/mnt/1e5686c5301f26b9b3cd24e322c608913465cc6c5d0dcd7c5e498d1314747d61 rw,relatime - aufs none rw,si=9b4a7644bdc0639c +81 15 0:3423 / /var/lib/docker/aufs/mnt/52edf6ee6e40bfec1e9301a4d4a92ab83d144e2ae4ce5099e99df6138cb844bf rw,relatime - aufs none rw,si=9b4a7644bdc0239c +82 15 0:3424 / /var/lib/docker/aufs/mnt/1ea10fb7085d28cda4904657dff0454e52598d28e1d77e4f2965bbc3666e808f rw,relatime - aufs none rw,si=9b4a76438cb7139c +83 15 0:3425 / /var/lib/docker/aufs/mnt/9c03e98c3593946dbd4087f8d83f9ca262f4a2efdc952ce60690838b9ba6c526 rw,relatime - aufs none rw,si=9b4a76443020639c +84 15 0:3426 / /var/lib/docker/aufs/mnt/220a2344d67437602c6d2cee9a98c46be13f82c2a8063919dd2fad52bf2fb7dd rw,relatime - aufs none rw,si=9b4a76434bff339c +94 15 0:3427 / /var/lib/docker/aufs/mnt/3b32876c5b200312c50baa476ff342248e88c8ea96e6a1032cd53a88738a1cf2 rw,relatime - aufs none rw,si=9b4a76434bff139c +95 15 0:3428 / /var/lib/docker/aufs/mnt/23ee2b8b0d4ae8db6f6d1e168e2c6f79f8a18f953b09f65e0d22cc1e67a3a6fa rw,relatime - aufs none rw,si=9b4a7646c305c39c +96 15 0:3429 / /var/lib/docker/aufs/mnt/e86e6daa70b61b57945fa178222615f3c3d6bcef12c9f28e9f8623d44dc2d429 rw,relatime - aufs none rw,si=9b4a7646c305f39c +97 15 0:3430 / /var/lib/docker/aufs/mnt/2413d07623e80860bb2e9e306fbdee699afd07525785c025c591231e864aa162 rw,relatime - aufs none rw,si=9b4a76434bff039c +98 15 0:3431 / /var/lib/docker/aufs/mnt/adfd622eb22340fc80b429e5564b125668e260bf9068096c46dd59f1386a4b7d rw,relatime - aufs none rw,si=9b4a7646a7a1039c +102 15 0:3435 / /var/lib/docker/aufs/mnt/27cd92e7a91d02e2d6b44d16679a00fb6d169b19b88822891084e7fd1a84882d rw,relatime - aufs none rw,si=9b4a7646f25ec39c +103 15 0:3436 / /var/lib/docker/aufs/mnt/27dfdaf94cfbf45055c748293c37dd68d9140240bff4c646cb09216015914a88 rw,relatime - aufs none rw,si=9b4a7646732f939c +104 15 0:3437 / /var/lib/docker/aufs/mnt/5ed7524aff68dfbf0fc601cbaeac01bab14391850a973dabf3653282a627920f rw,relatime - aufs none rw,si=9b4a7646732f839c +105 15 0:3438 / /var/lib/docker/aufs/mnt/2a0d4767e536beb5785b60e071e3ac8e5e812613ab143a9627bee77d0c9ab062 rw,relatime - aufs none rw,si=9b4a7646732fe39c +106 15 0:3439 / /var/lib/docker/aufs/mnt/dea3fc045d9f4ae51ba952450b948a822cf85c39411489ca5224f6d9a8d02bad rw,relatime - aufs none rw,si=9b4a764012ad839c +107 15 0:3440 / /var/lib/docker/aufs/mnt/2d140a787160798da60cb67c21b1210054ad4dafecdcf832f015995b9aa99cfd rw,relatime - aufs none rw,si=9b4a764012add39c +108 15 0:3441 / /var/lib/docker/aufs/mnt/cb190b2a8e984475914430fbad2382e0d20b9b659f8ef83ae8d170cc672e519c rw,relatime - aufs none rw,si=9b4a76454d9c239c +109 15 0:3442 / /var/lib/docker/aufs/mnt/2f4a012d5a7ffd90256a6e9aa479054b3dddbc3c6a343f26dafbf3196890223b rw,relatime - aufs none rw,si=9b4a76454d9c439c +110 15 0:3443 / /var/lib/docker/aufs/mnt/63cc77904b80c4ffbf49cb974c5d8733dc52ad7640d3ae87554b325d7312d87f rw,relatime - aufs none rw,si=9b4a76454d9c339c +111 15 0:3444 / /var/lib/docker/aufs/mnt/30333e872c451482ea2d235ff2192e875bd234006b238ae2bdde3b91a86d7522 rw,relatime - aufs none rw,si=9b4a76422cebf39c +112 15 0:3445 / /var/lib/docker/aufs/mnt/6c54fc1125da3925cae65b5c9a98f3be55b0a2c2666082e5094a4ba71beb5bff rw,relatime - aufs none rw,si=9b4a7646dd5a439c +113 15 0:3446 / /var/lib/docker/aufs/mnt/3087d48cb01cda9d0a83a9ca301e6ea40e8593d18c4921be4794c91a420ab9a3 rw,relatime - aufs none rw,si=9b4a7646dd5a739c +114 15 0:3447 / /var/lib/docker/aufs/mnt/cc2607462a8f55b179a749b144c3fdbb50678e1a4f3065ea04e283e9b1f1d8e2 rw,relatime - aufs none rw,si=9b4a7646dd5a239c +117 15 0:3450 / /var/lib/docker/aufs/mnt/310c5e8392b29e8658a22e08d96d63936633b7e2c38e8d220047928b00a03d24 rw,relatime - aufs none rw,si=9b4a7647932d739c +118 15 0:3451 / /var/lib/docker/aufs/mnt/38a1f0029406ba9c3b6058f2f406d8a1d23c855046cf355c91d87d446fcc1460 rw,relatime - aufs none rw,si=9b4a76445abc939c +119 15 0:3452 / /var/lib/docker/aufs/mnt/42e109ab7914ae997a11ccd860fd18e4d488c50c044c3240423ce15774b8b62e rw,relatime - aufs none rw,si=9b4a76445abca39c +120 15 0:3453 / /var/lib/docker/aufs/mnt/365d832af0402d052b389c1e9c0d353b48487533d20cd4351df8e24ec4e4f9d8 rw,relatime - aufs none rw,si=9b4a7644066aa39c +121 15 0:3454 / /var/lib/docker/aufs/mnt/d3fa8a24d695b6cda9b64f96188f701963d28bef0473343f8b212df1a2cf1d2b rw,relatime - aufs none rw,si=9b4a7644066af39c +122 15 0:3455 / /var/lib/docker/aufs/mnt/37d4f491919abc49a15d0c7a7cc8383f087573525d7d288accd14f0b4af9eae0 rw,relatime - aufs none rw,si=9b4a7644066ad39c +123 15 0:3456 / /var/lib/docker/aufs/mnt/93902707fe12cbdd0068ce73f2baad4b3a299189b1b19cb5f8a2025e106ae3f5 rw,relatime - aufs none rw,si=9b4a76444445f39c +126 15 0:3459 / /var/lib/docker/aufs/mnt/3b49291670a625b9bbb329ffba99bf7fa7abff80cefef040f8b89e2b3aad4f9f rw,relatime - aufs none rw,si=9b4a7640798a339c +127 15 0:3460 / /var/lib/docker/aufs/mnt/8d9c7b943cc8f854f4d0d4ec19f7c16c13b0cc4f67a41472a072648610cecb59 rw,relatime - aufs none rw,si=9b4a76427383039c +128 15 0:3461 / /var/lib/docker/aufs/mnt/3b6c90036526c376307df71d49c9f5fce334c01b926faa6a78186842de74beac rw,relatime - aufs none rw,si=9b4a7644badd439c +130 15 0:3463 / /var/lib/docker/aufs/mnt/7b24158eeddfb5d31b7e932e406ea4899fd728344335ff8e0765e89ddeb351dd rw,relatime - aufs none rw,si=9b4a7644badd539c +131 15 0:3464 / /var/lib/docker/aufs/mnt/3ead6dd5773765c74850cf6c769f21fe65c29d622ffa712664f9f5b80364ce27 rw,relatime - aufs none rw,si=9b4a7642f469939c +132 15 0:3465 / /var/lib/docker/aufs/mnt/3f825573b29547744a37b65597a9d6d15a8350be4429b7038d126a4c9a8e178f rw,relatime - aufs none rw,si=9b4a7642f469c39c +133 15 0:3466 / /var/lib/docker/aufs/mnt/f67aaaeb3681e5dcb99a41f847087370bd1c206680cb8c7b6a9819fd6c97a331 rw,relatime - aufs none rw,si=9b4a7647cc25939c +134 15 0:3467 / /var/lib/docker/aufs/mnt/41afe6cfb3c1fc2280b869db07699da88552786e28793f0bc048a265c01bd942 rw,relatime - aufs none rw,si=9b4a7647cc25c39c +135 15 0:3468 / /var/lib/docker/aufs/mnt/b8092ea59da34a40b120e8718c3ae9fa8436996edc4fc50e4b99c72dfd81e1af rw,relatime - aufs none rw,si=9b4a76445abc439c +136 15 0:3469 / /var/lib/docker/aufs/mnt/42c69d2cc179e2684458bb8596a9da6dad182c08eae9b74d5f0e615b399f75a5 rw,relatime - aufs none rw,si=9b4a76455ddbe39c +137 15 0:3470 / /var/lib/docker/aufs/mnt/ea0871954acd2d62a211ac60e05969622044d4c74597870c4f818fbb0c56b09b rw,relatime - aufs none rw,si=9b4a76455ddbf39c +138 15 0:3471 / /var/lib/docker/aufs/mnt/4307906b275ab3fc971786b3841ae3217ac85b6756ddeb7ad4ba09cd044c2597 rw,relatime - aufs none rw,si=9b4a76455ddb839c +139 15 0:3472 / /var/lib/docker/aufs/mnt/4390b872928c53500a5035634f3421622ed6299dc1472b631fc45de9f56dc180 rw,relatime - aufs none rw,si=9b4a76402f2fd39c +140 15 0:3473 / /var/lib/docker/aufs/mnt/6bb41e78863b85e4aa7da89455314855c8c3bda64e52a583bab15dc1fa2e80c2 rw,relatime - aufs none rw,si=9b4a76402f2fa39c +141 15 0:3474 / /var/lib/docker/aufs/mnt/4444f583c2a79c66608f4673a32c9c812154f027045fbd558c2d69920c53f835 rw,relatime - aufs none rw,si=9b4a764479dbd39c +142 15 0:3475 / /var/lib/docker/aufs/mnt/6f11883af4a05ea362e0c54df89058da4859f977efd07b6f539e1f55c1d2a668 rw,relatime - aufs none rw,si=9b4a76402f30b39c +143 15 0:3476 / /var/lib/docker/aufs/mnt/453490dd32e7c2e9ef906f995d8fb3c2753923d1a5e0ba3fd3296e2e4dc238e7 rw,relatime - aufs none rw,si=9b4a76402f30c39c +144 15 0:3477 / /var/lib/docker/aufs/mnt/45e5945735ee102b5e891c91650c57ec4b52bb53017d68f02d50ea8a6e230610 rw,relatime - aufs none rw,si=9b4a76423260739c +147 15 0:3480 / /var/lib/docker/aufs/mnt/4727a64a5553a1125f315b96bed10d3073d6988225a292cce732617c925b56ab rw,relatime - aufs none rw,si=9b4a76443030339c +150 15 0:3483 / /var/lib/docker/aufs/mnt/4e348b5187b9a567059306afc72d42e0ec5c893b0d4abd547526d5f9b6fb4590 rw,relatime - aufs none rw,si=9b4a7644f5d8c39c +151 15 0:3484 / /var/lib/docker/aufs/mnt/4efc616bfbc3f906718b052da22e4335f8e9f91ee9b15866ed3a8029645189ef rw,relatime - aufs none rw,si=9b4a7644f5d8939c +152 15 0:3485 / /var/lib/docker/aufs/mnt/83e730ae9754d5adb853b64735472d98dfa17136b8812ac9cfcd1eba7f4e7d2d rw,relatime - aufs none rw,si=9b4a76469aa7139c +153 15 0:3486 / /var/lib/docker/aufs/mnt/4fc5ba8a5b333be2b7eefacccb626772eeec0ae8a6975112b56c9fb36c0d342f rw,relatime - aufs none rw,si=9b4a7640128dc39c +154 15 0:3487 / /var/lib/docker/aufs/mnt/50200d5edff5dfe8d1ef3c78b0bbd709793ac6e936aa16d74ff66f7ea577b6f9 rw,relatime - aufs none rw,si=9b4a7640128da39c +155 15 0:3488 / /var/lib/docker/aufs/mnt/51e5e51604361448f0b9777f38329f414bc5ba9cf238f26d465ff479bd574b61 rw,relatime - aufs none rw,si=9b4a76444f68939c +156 15 0:3489 / /var/lib/docker/aufs/mnt/52a142149aa98bba83df8766bbb1c629a97b9799944ead90dd206c4bdf0b8385 rw,relatime - aufs none rw,si=9b4a76444f68b39c +157 15 0:3490 / /var/lib/docker/aufs/mnt/52dd21a94a00f58a1ed489312fcfffb91578089c76c5650364476f1d5de031bc rw,relatime - aufs none rw,si=9b4a76444f68f39c +158 15 0:3491 / /var/lib/docker/aufs/mnt/ee562415ddaad353ed22c88d0ca768a0c74bfba6333b6e25c46849ee22d990da rw,relatime - aufs none rw,si=9b4a7640128d839c +159 15 0:3492 / /var/lib/docker/aufs/mnt/db47a9e87173f7554f550c8a01891de79cf12acdd32e01f95c1a527a08bdfb2c rw,relatime - aufs none rw,si=9b4a764405a1d39c +160 15 0:3493 / /var/lib/docker/aufs/mnt/55e827bf6d44d930ec0b827c98356eb8b68c3301e2d60d1429aa72e05b4c17df rw,relatime - aufs none rw,si=9b4a764405a1a39c +162 15 0:3495 / /var/lib/docker/aufs/mnt/578dc4e0a87fc37ec081ca098430499a59639c09f6f12a8f48de29828a091aa6 rw,relatime - aufs none rw,si=9b4a76406d7d439c +163 15 0:3496 / /var/lib/docker/aufs/mnt/728cc1cb04fa4bc6f7bf7a90980beda6d8fc0beb71630874c0747b994efb0798 rw,relatime - aufs none rw,si=9b4a76444f20e39c +164 15 0:3497 / /var/lib/docker/aufs/mnt/5850cc4bd9b55aea46c7ad598f1785117607974084ea643580f58ce3222e683a rw,relatime - aufs none rw,si=9b4a7644a824239c +165 15 0:3498 / /var/lib/docker/aufs/mnt/89443b3f766d5a37bc8b84e29da8b84e6a3ea8486d3cf154e2aae1816516e4a8 rw,relatime - aufs none rw,si=9b4a7644a824139c +166 15 0:3499 / /var/lib/docker/aufs/mnt/f5ae8fd5a41a337907d16515bc3162525154b59c32314c695ecd092c3b47943d rw,relatime - aufs none rw,si=9b4a7644a824439c +167 15 0:3500 / /var/lib/docker/aufs/mnt/5a430854f2a03a9e5f7cbc9f3fb46a8ebca526a5b3f435236d8295e5998798f5 rw,relatime - aufs none rw,si=9b4a7647fc82439c +168 15 0:3501 / /var/lib/docker/aufs/mnt/eda16901ae4cead35070c39845cbf1e10bd6b8cb0ffa7879ae2d8a186e460f91 rw,relatime - aufs none rw,si=9b4a76441e0df39c +169 15 0:3502 / /var/lib/docker/aufs/mnt/5a593721430c2a51b119ff86a7e06ea2b37e3b4131f8f1344d402b61b0c8d868 rw,relatime - aufs none rw,si=9b4a764248bad39c +170 15 0:3503 / /var/lib/docker/aufs/mnt/d662ad0a30fbfa902e0962108685b9330597e1ee2abb16dc9462eb5a67fdd23f rw,relatime - aufs none rw,si=9b4a764248bae39c +171 15 0:3504 / /var/lib/docker/aufs/mnt/5bc9de5c79812843fb36eee96bef1ddba812407861f572e33242f4ee10da2c15 rw,relatime - aufs none rw,si=9b4a764248ba839c +172 15 0:3505 / /var/lib/docker/aufs/mnt/5e763de8e9b0f7d58d2e12a341e029ab4efb3b99788b175090d8209e971156c1 rw,relatime - aufs none rw,si=9b4a764248baa39c +173 15 0:3506 / /var/lib/docker/aufs/mnt/b4431dc2739936f1df6387e337f5a0c99cf051900c896bd7fd46a870ce61c873 rw,relatime - aufs none rw,si=9b4a76401263539c +174 15 0:3507 / /var/lib/docker/aufs/mnt/5f37830e5a02561ab8c67ea3113137ba69f67a60e41c05cb0e7a0edaa1925b24 rw,relatime - aufs none rw,si=9b4a76401263639c +184 15 0:3508 / /var/lib/docker/aufs/mnt/62ea10b957e6533538a4633a1e1d678502f50ddcdd354b2ca275c54dd7a7793a rw,relatime - aufs none rw,si=9b4a76401263039c +187 15 0:3509 / /var/lib/docker/aufs/mnt/d56ee9d44195fe390e042fda75ec15af5132adb6d5c69468fa8792f4e54a6953 rw,relatime - aufs none rw,si=9b4a76401263239c +188 15 0:3510 / /var/lib/docker/aufs/mnt/6a300930673174549c2b62f36c933f0332a20735978c007c805a301f897146c5 rw,relatime - aufs none rw,si=9b4a76455d4c539c +189 15 0:3511 / /var/lib/docker/aufs/mnt/64496c45c84d348c24d410015456d101601c30cab4d1998c395591caf7e57a70 rw,relatime - aufs none rw,si=9b4a76455d4c639c +190 15 0:3512 / /var/lib/docker/aufs/mnt/65a6a645883fe97a7422cd5e71ebe0bc17c8e6302a5361edf52e89747387e908 rw,relatime - aufs none rw,si=9b4a76455d4c039c +191 15 0:3513 / /var/lib/docker/aufs/mnt/672be40695f7b6e13b0a3ed9fc996c73727dede3481f58155950fcfad57ed616 rw,relatime - aufs none rw,si=9b4a76455d4c239c +192 15 0:3514 / /var/lib/docker/aufs/mnt/d42438acb2bfb2169e1c0d8e917fc824f7c85d336dadb0b0af36dfe0f001b3ba rw,relatime - aufs none rw,si=9b4a7642bfded39c +193 15 0:3515 / /var/lib/docker/aufs/mnt/b48a54abf26d01cb2ddd908b1ed6034d17397c1341bf0eb2b251a3e5b79be854 rw,relatime - aufs none rw,si=9b4a7642bfdee39c +194 15 0:3516 / /var/lib/docker/aufs/mnt/76f27134491f052bfb87f59092126e53ef875d6851990e59195a9da16a9412f8 rw,relatime - aufs none rw,si=9b4a7642bfde839c +195 15 0:3517 / /var/lib/docker/aufs/mnt/6bd626a5462b4f8a8e1cc7d10351326dca97a59b2758e5ea549a4f6350ce8a90 rw,relatime - aufs none rw,si=9b4a7642bfdea39c +196 15 0:3518 / /var/lib/docker/aufs/mnt/f1fe3549dbd6f5ca615e9139d9b53f0c83a3b825565df37628eacc13e70cbd6d rw,relatime - aufs none rw,si=9b4a7642bfdf539c +197 15 0:3519 / /var/lib/docker/aufs/mnt/6d0458c8426a9e93d58d0625737e6122e725c9408488ed9e3e649a9984e15c34 rw,relatime - aufs none rw,si=9b4a7642bfdf639c +198 15 0:3520 / /var/lib/docker/aufs/mnt/6e4c97db83aa82145c9cf2bafc20d500c0b5389643b689e3ae84188c270a48c5 rw,relatime - aufs none rw,si=9b4a7642bfdf039c +199 15 0:3521 / /var/lib/docker/aufs/mnt/eb94d6498f2c5969eaa9fa11ac2934f1ab90ef88e2d002258dca08e5ba74ea27 rw,relatime - aufs none rw,si=9b4a7642bfdf239c +200 15 0:3522 / /var/lib/docker/aufs/mnt/fe3f88f0c511608a2eec5f13a98703aa16e55dbf930309723d8a37101f539fe1 rw,relatime - aufs none rw,si=9b4a7642bfc3539c +201 15 0:3523 / /var/lib/docker/aufs/mnt/6f40c229fb9cad85fabf4b64a2640a5403ec03fe5ac1a57d0609fb8b606b9c83 rw,relatime - aufs none rw,si=9b4a7642bfc3639c +202 15 0:3524 / /var/lib/docker/aufs/mnt/7513e9131f7a8acf58ff15248237feb767c78732ca46e159f4d791e6ef031dbc rw,relatime - aufs none rw,si=9b4a7642bfc3039c +203 15 0:3525 / /var/lib/docker/aufs/mnt/79f48b00aa713cdf809c6bb7c7cb911b66e9a8076c81d6c9d2504139984ea2da rw,relatime - aufs none rw,si=9b4a7642bfc3239c +204 15 0:3526 / /var/lib/docker/aufs/mnt/c3680418350d11358f0a96c676bc5aa74fa00a7c89e629ef5909d3557b060300 rw,relatime - aufs none rw,si=9b4a7642f47cd39c +205 15 0:3527 / /var/lib/docker/aufs/mnt/7a1744dd350d7fcc0cccb6f1757ca4cbe5453f203a5888b0f1014d96ad5a5ef9 rw,relatime - aufs none rw,si=9b4a7642f47ce39c +206 15 0:3528 / /var/lib/docker/aufs/mnt/7fa99662db046be9f03c33c35251afda9ccdc0085636bbba1d90592cec3ff68d rw,relatime - aufs none rw,si=9b4a7642f47c839c +207 15 0:3529 / /var/lib/docker/aufs/mnt/f815021ef20da9c9b056bd1d52d8aaf6e2c0c19f11122fc793eb2b04eb995e35 rw,relatime - aufs none rw,si=9b4a7642f47ca39c +208 15 0:3530 / /var/lib/docker/aufs/mnt/801086ae3110192d601dfcebdba2db92e86ce6b6a9dba6678ea04488e4513669 rw,relatime - aufs none rw,si=9b4a7642dc6dd39c +209 15 0:3531 / /var/lib/docker/aufs/mnt/822ba7db69f21daddda87c01cfbfbf73013fc03a879daf96d16cdde6f9b1fbd6 rw,relatime - aufs none rw,si=9b4a7642dc6de39c +210 15 0:3532 / /var/lib/docker/aufs/mnt/834227c1a950fef8cae3827489129d0dd220541e60c6b731caaa765bf2e6a199 rw,relatime - aufs none rw,si=9b4a7642dc6d839c +211 15 0:3533 / /var/lib/docker/aufs/mnt/83dccbc385299bd1c7cf19326e791b33a544eea7b4cdfb6db70ea94eed4389fb rw,relatime - aufs none rw,si=9b4a7642dc6da39c +212 15 0:3534 / /var/lib/docker/aufs/mnt/f1b8e6f0e7c8928b5dcdab944db89306ebcae3e0b32f9ff40d2daa8329f21600 rw,relatime - aufs none rw,si=9b4a7645a126039c +213 15 0:3535 / /var/lib/docker/aufs/mnt/970efb262c7a020c2404cbcc5b3259efba0d110a786079faeef05bc2952abf3a rw,relatime - aufs none rw,si=9b4a7644c8ed139c +214 15 0:3536 / /var/lib/docker/aufs/mnt/84b6d73af7450f3117a77e15a5ca1255871fea6182cd8e8a7be6bc744be18c2c rw,relatime - aufs none rw,si=9b4a76406559139c +215 15 0:3537 / /var/lib/docker/aufs/mnt/88be2716e026bc681b5e63fe7942068773efbd0b6e901ca7ba441412006a96b6 rw,relatime - aufs none rw,si=9b4a76406559339c +216 15 0:3538 / /var/lib/docker/aufs/mnt/c81939aa166ce50cd8bca5cfbbcc420a78e0318dd5cd7c755209b9166a00a752 rw,relatime - aufs none rw,si=9b4a76406559239c +217 15 0:3539 / /var/lib/docker/aufs/mnt/e0f241645d64b7dc5ff6a8414087cca226be08fb54ce987d1d1f6350c57083aa rw,relatime - aufs none rw,si=9b4a7647cfc0f39c +218 15 0:3540 / /var/lib/docker/aufs/mnt/e10e2bf75234ed51d8a6a4bb39e465404fecbe318e54400d3879cdb2b0679c78 rw,relatime - aufs none rw,si=9b4a7647cfc0939c +219 15 0:3541 / /var/lib/docker/aufs/mnt/8f71d74c8cfc3228b82564aa9f09b2e576cff0083ddfb6aa5cb350346063f080 rw,relatime - aufs none rw,si=9b4a7647cfc0a39c +220 15 0:3542 / /var/lib/docker/aufs/mnt/9159f1eba2aef7f5205cc18d015cda7f5933cd29bba3b1b8aed5ccb5824c69ee rw,relatime - aufs none rw,si=9b4a76468cedd39c +221 15 0:3543 / /var/lib/docker/aufs/mnt/932cad71e652e048e500d9fbb5b8ea4fc9a269d42a3134ce527ceef42a2be56b rw,relatime - aufs none rw,si=9b4a76468cede39c +222 15 0:3544 / /var/lib/docker/aufs/mnt/bf1e1b5f529e8943cc0144ee86dbaaa37885c1ddffcef29537e0078ee7dd316a rw,relatime - aufs none rw,si=9b4a76468ced839c +223 15 0:3545 / /var/lib/docker/aufs/mnt/949d93ecf3322e09f858ce81d5f4b434068ec44ff84c375de03104f7b45ee955 rw,relatime - aufs none rw,si=9b4a76468ceda39c +224 15 0:3546 / /var/lib/docker/aufs/mnt/d65c6087f92dc2a3841b5251d2fe9ca07d4c6e5b021597692479740816e4e2a1 rw,relatime - aufs none rw,si=9b4a7645a126239c +225 15 0:3547 / /var/lib/docker/aufs/mnt/98a0153119d0651c193d053d254f6e16a68345a141baa80c87ae487e9d33f290 rw,relatime - aufs none rw,si=9b4a7640787cf39c +226 15 0:3548 / /var/lib/docker/aufs/mnt/99daf7fe5847c017392f6e59aa9706b3dfdd9e6d1ba11dae0f7fffde0a60b5e5 rw,relatime - aufs none rw,si=9b4a7640787c839c +227 15 0:3549 / /var/lib/docker/aufs/mnt/9ad1f2fe8a5599d4e10c5a6effa7f03d932d4e92ee13149031a372087a359079 rw,relatime - aufs none rw,si=9b4a7640787ca39c +228 15 0:3550 / /var/lib/docker/aufs/mnt/c26d64494da782ddac26f8370d86ac93e7c1666d88a7b99110fc86b35ea6a85d rw,relatime - aufs none rw,si=9b4a7642fc6b539c +229 15 0:3551 / /var/lib/docker/aufs/mnt/a49e4a8275133c230ec640997f35f172312eb0ea5bd2bbe10abf34aae98f30eb rw,relatime - aufs none rw,si=9b4a7642fc6b639c +230 15 0:3552 / /var/lib/docker/aufs/mnt/b5e2740c867ed843025f49d84e8d769de9e8e6039b3c8cb0735b5bf358994bc7 rw,relatime - aufs none rw,si=9b4a7642fc6b039c +231 15 0:3553 / /var/lib/docker/aufs/mnt/a826fdcf3a7039b30570054579b65763db605a314275d7aef31b872c13311b4b rw,relatime - aufs none rw,si=9b4a7642fc6b239c +232 15 0:3554 / /var/lib/docker/aufs/mnt/addf3025babf5e43b5a3f4a0da7ad863dda3c01fb8365c58fd8d28bb61dc11bc rw,relatime - aufs none rw,si=9b4a76407871d39c +233 15 0:3555 / /var/lib/docker/aufs/mnt/c5b6c6813ab3e5ebdc6d22cb2a3d3106a62095f2c298be52b07a3b0fa20ff690 rw,relatime - aufs none rw,si=9b4a76407871e39c +234 15 0:3556 / /var/lib/docker/aufs/mnt/af0609eaaf64e2392060cb46f5a9f3d681a219bb4c651d4f015bf573fbe6c4cf rw,relatime - aufs none rw,si=9b4a76407871839c +235 15 0:3557 / /var/lib/docker/aufs/mnt/e7f20e3c37ecad39cd90a97cd3549466d0d106ce4f0a930b8495442634fa4a1f rw,relatime - aufs none rw,si=9b4a76407871a39c +237 15 0:3559 / /var/lib/docker/aufs/mnt/b57a53d440ffd0c1295804fa68cdde35d2fed5409484627e71b9c37e4249fd5c rw,relatime - aufs none rw,si=9b4a76444445a39c +238 15 0:3560 / /var/lib/docker/aufs/mnt/b5e7d7b8f35e47efbba3d80c5d722f5e7bd43e54c824e54b4a4b351714d36d42 rw,relatime - aufs none rw,si=9b4a7647932d439c +239 15 0:3561 / /var/lib/docker/aufs/mnt/f1b136def157e9465640658f277f3347de593c6ae76412a2e79f7002f091cae2 rw,relatime - aufs none rw,si=9b4a76445abcd39c +240 15 0:3562 / /var/lib/docker/aufs/mnt/b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc rw,relatime - aufs none rw,si=9b4a7644403b339c +241 15 0:3563 / /var/lib/docker/aufs/mnt/b89b140cdbc95063761864e0a23346207fa27ee4c5c63a1ae85c9069a9d9cf1d rw,relatime - aufs none rw,si=9b4a7644aa19739c +242 15 0:3564 / /var/lib/docker/aufs/mnt/bc6a69ed51c07f5228f6b4f161c892e6a949c0e7e86a9c3432049d4c0e5cd298 rw,relatime - aufs none rw,si=9b4a7644aa19139c +243 15 0:3565 / /var/lib/docker/aufs/mnt/be4e2ba3f136933e239f7cf3d136f484fb9004f1fbdfee24a62a2c7b0ab30670 rw,relatime - aufs none rw,si=9b4a7644aa19339c +244 15 0:3566 / /var/lib/docker/aufs/mnt/e04ca1a4a5171e30d20f0c92f90a50b8b6f8600af5459c4b4fb25e42e864dfe1 rw,relatime - aufs none rw,si=9b4a7647932d139c +245 15 0:3567 / /var/lib/docker/aufs/mnt/be61576b31db893129aaffcd3dcb5ce35e49c4b71b30c392a78609a45c7323d8 rw,relatime - aufs none rw,si=9b4a7642d85f739c +246 15 0:3568 / /var/lib/docker/aufs/mnt/dda42c191e56becf672327658ab84fcb563322db3764b91c2fefe4aaef04c624 rw,relatime - aufs none rw,si=9b4a7642d85f139c +247 15 0:3569 / /var/lib/docker/aufs/mnt/c0a7995053330f3d88969247a2e72b07e2dd692133f5668a4a35ea3905561072 rw,relatime - aufs none rw,si=9b4a7642d85f339c +249 15 0:3571 / /var/lib/docker/aufs/mnt/c3594b2e5f08c59ff5ed338a1ba1eceeeb1f7fc5d180068338110c00b1eb8502 rw,relatime - aufs none rw,si=9b4a7642738c739c +250 15 0:3572 / /var/lib/docker/aufs/mnt/c58dce03a0ab0a7588393880379dc3bce9f96ec08ed3f99cf1555260ff0031e8 rw,relatime - aufs none rw,si=9b4a7642738c139c +251 15 0:3573 / /var/lib/docker/aufs/mnt/c73e9f1d109c9d14cb36e1c7489df85649be3911116d76c2fd3648ec8fd94e23 rw,relatime - aufs none rw,si=9b4a7642738c339c +252 15 0:3574 / /var/lib/docker/aufs/mnt/c9eef28c344877cd68aa09e543c0710ab2b305a0ff96dbb859bfa7808c3e8d01 rw,relatime - aufs none rw,si=9b4a7642d85f439c +253 15 0:3575 / /var/lib/docker/aufs/mnt/feb67148f548d70cb7484f2aaad2a86051cd6867a561741a2f13b552457d666e rw,relatime - aufs none rw,si=9b4a76468c55739c +254 15 0:3576 / /var/lib/docker/aufs/mnt/cdf1f96c36d35a96041a896bf398ec0f7dc3b0fb0643612a0f4b6ff96e04e1bb rw,relatime - aufs none rw,si=9b4a76468c55139c +255 15 0:3577 / /var/lib/docker/aufs/mnt/ec6e505872353268451ac4bc034c1df00f3bae4a3ea2261c6e48f7bd5417c1b3 rw,relatime - aufs none rw,si=9b4a76468c55339c +256 15 0:3578 / /var/lib/docker/aufs/mnt/d6dc8aca64efd90e0bc10274001882d0efb310d42ccbf5712b99b169053b8b1a rw,relatime - aufs none rw,si=9b4a7642738c439c +257 15 0:3579 / /var/lib/docker/aufs/mnt/d712594e2ff6eaeb895bfd150d694bd1305fb927e7a186b2dab7df2ea95f8f81 rw,relatime - aufs none rw,si=9b4a76401268f39c +259 15 0:3581 / /var/lib/docker/aufs/mnt/dbfa1174cd78cde2d7410eae442af0b416c4a0e6f87ed4ff1e9f169a0029abc0 rw,relatime - aufs none rw,si=9b4a76401268b39c +260 15 0:3582 / /var/lib/docker/aufs/mnt/e883f5a82316d7856fbe93ee8c0af5a920b7079619dd95c4ffd88bbd309d28dd rw,relatime - aufs none rw,si=9b4a76468c55439c +261 15 0:3583 / /var/lib/docker/aufs/mnt/fdec3eff581c4fc2b09f87befa2fa021f3f2d373bea636a87f1fb5b367d6347a rw,relatime - aufs none rw,si=9b4a7644aa1af39c +262 15 0:3584 / /var/lib/docker/aufs/mnt/ef764e26712184653067ecf7afea18a80854c41331ca0f0ef03e1bacf90a6ffc rw,relatime - aufs none rw,si=9b4a7644aa1a939c +263 15 0:3585 / /var/lib/docker/aufs/mnt/f3176b40c41fce8ce6942936359a2001a6f1b5c1bb40ee224186db0789ec2f76 rw,relatime - aufs none rw,si=9b4a7644aa1ab39c +264 15 0:3586 / /var/lib/docker/aufs/mnt/f5daf06785d3565c6dd18ea7d953d9a8b9606107781e63270fe0514508736e6a rw,relatime - aufs none rw,si=9b4a76401268c39c +58 15 0:3587 / /var/lib/docker/aufs/mnt/cde8c40f6524b7361af4f5ad05bb857dc9ee247c20852ba666195c0739e3a2b8-init rw,relatime - aufs none rw,si=9b4a76444445839c +67 15 0:3588 / /var/lib/docker/aufs/mnt/cde8c40f6524b7361af4f5ad05bb857dc9ee247c20852ba666195c0739e3a2b8 rw,relatime - aufs none rw,si=9b4a7644badd339c +265 15 0:3610 / /var/lib/docker/aufs/mnt/e812472cd2c8c4748d1ef71fac4e77e50d661b9349abe66ce3e23511ed44f414 rw,relatime - aufs none rw,si=9b4a76427937d39c +270 15 0:3615 / /var/lib/docker/aufs/mnt/997636e7c5c9d0d1376a217e295c14c205350b62bc12052804fb5f90abe6f183 rw,relatime - aufs none rw,si=9b4a76406540739c +273 15 0:3618 / /var/lib/docker/aufs/mnt/d5794d080417b6e52e69227c3873e0e4c1ff0d5a845ebe3860ec2f89a47a2a1e rw,relatime - aufs none rw,si=9b4a76454814039c +278 15 0:3623 / /var/lib/docker/aufs/mnt/586bdd48baced671bb19bc4d294ec325f26c55545ae267db426424f157d59c48 rw,relatime - aufs none rw,si=9b4a7644b439f39c +281 15 0:3626 / /var/lib/docker/aufs/mnt/69739d022f89f8586908bbd5edbbdd95ea5256356f177f9ffcc6ef9c0ea752d2 rw,relatime - aufs none rw,si=9b4a7644a0f1b39c +286 15 0:3631 / /var/lib/docker/aufs/mnt/ff28c27d5f894363993622de26d5dd352dba072f219e4691d6498c19bbbc15a9 rw,relatime - aufs none rw,si=9b4a7642265b339c +289 15 0:3634 / /var/lib/docker/aufs/mnt/aa128fe0e64fdede333aa48fd9de39530c91a9244a0f0649a3c411c61e372daa rw,relatime - aufs none rw,si=9b4a764012ada39c +99 15 8:33 / /media/REMOVE\040ME rw,nosuid,nodev,relatime - fuseblk /dev/sdc1 rw,user_id=0,group_id=0,allow_other,blksize=4096` +) + +func TestParseFedoraMountinfo(t *testing.T) { + r := bytes.NewBuffer([]byte(fedoraMountinfo)) + _, err := parseInfoFile(r) + if err != nil { + t.Fatal(err) + } +} + +func TestParseUbuntuMountinfo(t *testing.T) { + r := bytes.NewBuffer([]byte(ubuntuMountInfo)) + _, err := parseInfoFile(r) + if err != nil { + t.Fatal(err) + } +} + +func TestParseGentooMountinfo(t *testing.T) { + r := bytes.NewBuffer([]byte(gentooMountinfo)) + _, err := parseInfoFile(r) + if err != nil { + t.Fatal(err) + } +} diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/MAINTAINERS b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/MAINTAINERS new file mode 100644 index 0000000000..8c8902530a --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/MAINTAINERS @@ -0,0 +1 @@ +Erik Hollensbe (@erikh) diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/parsers/filters/parse.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/filters/parse.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/parsers/filters/parse.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/filters/parse.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/filters/parse_test.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/filters/parse_test.go new file mode 100644 index 0000000000..a248350223 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/filters/parse_test.go @@ -0,0 +1,78 @@ +package filters + +import ( + "sort" + "testing" +) + +func TestParseArgs(t *testing.T) { + // equivalent of `docker ps -f 'created=today' -f 'image.name=ubuntu*' -f 'image.name=*untu'` + flagArgs := []string{ + "created=today", + "image.name=ubuntu*", + "image.name=*untu", + } + var ( + args = Args{} + err error + ) + for i := range flagArgs { + args, err = ParseFlag(flagArgs[i], args) + if err != nil { + t.Errorf("failed to parse %s: %s", flagArgs[i], err) + } + } + if len(args["created"]) != 1 { + t.Errorf("failed to set this arg") + } + if len(args["image.name"]) != 2 { + t.Errorf("the args should have collapsed") + } +} + +func TestParam(t *testing.T) { + a := Args{ + "created": []string{"today"}, + "image.name": []string{"ubuntu*", "*untu"}, + } + + v, err := ToParam(a) + if err != nil { + t.Errorf("failed to marshal the filters: %s", err) + } + v1, err := FromParam(v) + if err != nil { + t.Errorf("%s", err) + } + for key, vals := range v1 { + if _, ok := a[key]; !ok { + t.Errorf("could not find key %s in original set", key) + } + sort.Strings(vals) + sort.Strings(a[key]) + if len(vals) != len(a[key]) { + t.Errorf("value lengths ought to match") + continue + } + for i := range vals { + if vals[i] != a[key][i] { + t.Errorf("expected %s, but got %s", a[key][i], vals[i]) + } + } + } +} + +func TestEmpty(t *testing.T) { + a := Args{} + v, err := ToParam(a) + if err != nil { + t.Errorf("failed to marshal the filters: %s", err) + } + v1, err := FromParam(v) + if err != nil { + t.Errorf("%s", err) + } + if len(a) != len(v1) { + t.Errorf("these should both be empty sets") + } +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/kernel.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/kernel.go new file mode 100644 index 0000000000..70d09003a3 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/kernel.go @@ -0,0 +1,93 @@ +package kernel + +import ( + "bytes" + "errors" + "fmt" +) + +type KernelVersionInfo struct { + Kernel int + Major int + Minor int + Flavor string +} + +func (k *KernelVersionInfo) String() string { + return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, k.Flavor) +} + +// Compare two KernelVersionInfo struct. +// Returns -1 if a < b, 0 if a == b, 1 it a > b +func CompareKernelVersion(a, b *KernelVersionInfo) int { + if a.Kernel < b.Kernel { + return -1 + } else if a.Kernel > b.Kernel { + return 1 + } + + if a.Major < b.Major { + return -1 + } else if a.Major > b.Major { + return 1 + } + + if a.Minor < b.Minor { + return -1 + } else if a.Minor > b.Minor { + return 1 + } + + return 0 +} + +func GetKernelVersion() (*KernelVersionInfo, error) { + var ( + err error + ) + + uts, err := uname() + if err != nil { + return nil, err + } + + release := make([]byte, len(uts.Release)) + + i := 0 + for _, c := range uts.Release { + release[i] = byte(c) + i++ + } + + // Remove the \x00 from the release for Atoi to parse correctly + release = release[:bytes.IndexByte(release, 0)] + + return ParseRelease(string(release)) +} + +func ParseRelease(release string) (*KernelVersionInfo, error) { + var ( + kernel, major, minor, parsed int + flavor, partial string + ) + + // Ignore error from Sscanf to allow an empty flavor. Instead, just + // make sure we got all the version numbers. + parsed, _ = fmt.Sscanf(release, "%d.%d%s", &kernel, &major, &partial) + if parsed < 2 { + return nil, errors.New("Can't parse kernel version " + release) + } + + // sometimes we have 3.12.25-gentoo, but sometimes we just have 3.12-1-amd64 + parsed, _ = fmt.Sscanf(partial, ".%d%s", &minor, &flavor) + if parsed < 1 { + flavor = partial + } + + return &KernelVersionInfo{ + Kernel: kernel, + Major: major, + Minor: minor, + Flavor: flavor, + }, nil +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/kernel_test.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/kernel_test.go new file mode 100644 index 0000000000..e211a63b7d --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/kernel_test.go @@ -0,0 +1,61 @@ +package kernel + +import ( + "testing" +) + +func assertParseRelease(t *testing.T, release string, b *KernelVersionInfo, result int) { + var ( + a *KernelVersionInfo + ) + a, _ = ParseRelease(release) + + if r := CompareKernelVersion(a, b); r != result { + t.Fatalf("Unexpected kernel version comparison result. Found %d, expected %d", r, result) + } + if a.Flavor != b.Flavor { + t.Fatalf("Unexpected parsed kernel flavor. Found %s, expected %s", a.Flavor, b.Flavor) + } +} + +func TestParseRelease(t *testing.T) { + assertParseRelease(t, "3.8.0", &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, 0) + assertParseRelease(t, "3.4.54.longterm-1", &KernelVersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0) + assertParseRelease(t, "3.4.54.longterm-1", &KernelVersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0) + assertParseRelease(t, "3.8.0-19-generic", &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "-19-generic"}, 0) + assertParseRelease(t, "3.12.8tag", &KernelVersionInfo{Kernel: 3, Major: 12, Minor: 8, Flavor: "tag"}, 0) + assertParseRelease(t, "3.12-1-amd64", &KernelVersionInfo{Kernel: 3, Major: 12, Minor: 0, Flavor: "-1-amd64"}, 0) +} + +func assertKernelVersion(t *testing.T, a, b *KernelVersionInfo, result int) { + if r := CompareKernelVersion(a, b); r != result { + t.Fatalf("Unexpected kernel version comparison result. Found %d, expected %d", r, result) + } +} + +func TestCompareKernelVersion(t *testing.T) { + assertKernelVersion(t, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, + 0) + assertKernelVersion(t, + &KernelVersionInfo{Kernel: 2, Major: 6, Minor: 0}, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, + -1) + assertKernelVersion(t, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, + &KernelVersionInfo{Kernel: 2, Major: 6, Minor: 0}, + 1) + assertKernelVersion(t, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, + 0) + assertKernelVersion(t, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 5}, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, + 1) + assertKernelVersion(t, + &KernelVersionInfo{Kernel: 3, Major: 0, Minor: 20}, + &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, + -1) +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go new file mode 100644 index 0000000000..8ca814c1fb --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go @@ -0,0 +1,16 @@ +package kernel + +import ( + "syscall" +) + +type Utsname syscall.Utsname + +func uname() (*syscall.Utsname, error) { + uts := &syscall.Utsname{} + + if err := syscall.Uname(uts); err != nil { + return nil, err + } + return uts, nil +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go new file mode 100644 index 0000000000..00c5422589 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go @@ -0,0 +1,15 @@ +// +build !linux + +package kernel + +import ( + "errors" +) + +type Utsname struct { + Release [65]byte +} + +func uname() (*Utsname, error) { + return nil, errors.New("Kernel version detection is available only on linux") +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem.go new file mode 100644 index 0000000000..af185f9f6b --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem.go @@ -0,0 +1,40 @@ +package operatingsystem + +import ( + "bytes" + "errors" + "io/ioutil" +) + +var ( + // file to use to detect if the daemon is running in a container + proc1Cgroup = "/proc/1/cgroup" + + // file to check to determine Operating System + etcOsRelease = "/etc/os-release" +) + +func GetOperatingSystem() (string, error) { + b, err := ioutil.ReadFile(etcOsRelease) + if err != nil { + return "", err + } + if i := bytes.Index(b, []byte("PRETTY_NAME")); i >= 0 { + b = b[i+13:] + return string(b[:bytes.IndexByte(b, '"')]), nil + } + return "", errors.New("PRETTY_NAME not found") +} + +func IsContainerized() (bool, error) { + b, err := ioutil.ReadFile(proc1Cgroup) + if err != nil { + return false, err + } + for _, line := range bytes.Split(b, []byte{'\n'}) { + if len(line) > 0 && !bytes.HasSuffix(line, []byte{'/'}) { + return true, nil + } + } + return false, nil +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_test.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_test.go new file mode 100644 index 0000000000..d264b35f03 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_test.go @@ -0,0 +1,123 @@ +package operatingsystem + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func TestGetOperatingSystem(t *testing.T) { + var ( + backup = etcOsRelease + ubuntuTrusty = []byte(`NAME="Ubuntu" +VERSION="14.04, Trusty Tahr" +ID=ubuntu +ID_LIKE=debian +PRETTY_NAME="Ubuntu 14.04 LTS" +VERSION_ID="14.04" +HOME_URL="http://www.ubuntu.com/" +SUPPORT_URL="http://help.ubuntu.com/" +BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`) + gentoo = []byte(`NAME=Gentoo +ID=gentoo +PRETTY_NAME="Gentoo/Linux" +ANSI_COLOR="1;32" +HOME_URL="http://www.gentoo.org/" +SUPPORT_URL="http://www.gentoo.org/main/en/support.xml" +BUG_REPORT_URL="https://bugs.gentoo.org/" +`) + noPrettyName = []byte(`NAME="Ubuntu" +VERSION="14.04, Trusty Tahr" +ID=ubuntu +ID_LIKE=debian +VERSION_ID="14.04" +HOME_URL="http://www.ubuntu.com/" +SUPPORT_URL="http://help.ubuntu.com/" +BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`) + ) + + dir := os.TempDir() + defer func() { + etcOsRelease = backup + os.RemoveAll(dir) + }() + + etcOsRelease = filepath.Join(dir, "etcOsRelease") + for expect, osRelease := range map[string][]byte{ + "Ubuntu 14.04 LTS": ubuntuTrusty, + "Gentoo/Linux": gentoo, + "": noPrettyName, + } { + if err := ioutil.WriteFile(etcOsRelease, osRelease, 0600); err != nil { + t.Fatalf("failed to write to %s: %v", etcOsRelease, err) + } + s, err := GetOperatingSystem() + if s != expect { + if expect == "" { + t.Fatalf("Expected error 'PRETTY_NAME not found', but got %v", err) + } else { + t.Fatalf("Expected '%s', but got '%s'. Err=%v", expect, s, err) + } + } + } +} + +func TestIsContainerized(t *testing.T) { + var ( + backup = proc1Cgroup + nonContainerizedProc1Cgroup = []byte(`14:name=systemd:/ +13:hugetlb:/ +12:net_prio:/ +11:perf_event:/ +10:bfqio:/ +9:blkio:/ +8:net_cls:/ +7:freezer:/ +6:devices:/ +5:memory:/ +4:cpuacct:/ +3:cpu:/ +2:cpuset:/ +`) + containerizedProc1Cgroup = []byte(`9:perf_event:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +8:blkio:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +7:net_cls:/ +6:freezer:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +5:devices:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +4:memory:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +3:cpuacct:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +2:cpu:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +1:cpuset:/`) + ) + + dir := os.TempDir() + defer func() { + proc1Cgroup = backup + os.RemoveAll(dir) + }() + + proc1Cgroup = filepath.Join(dir, "proc1Cgroup") + + if err := ioutil.WriteFile(proc1Cgroup, nonContainerizedProc1Cgroup, 0600); err != nil { + t.Fatalf("failed to write to %s: %v", proc1Cgroup, err) + } + inContainer, err := IsContainerized() + if err != nil { + t.Fatal(err) + } + if inContainer { + t.Fatal("Wrongly assuming containerized") + } + + if err := ioutil.WriteFile(proc1Cgroup, containerizedProc1Cgroup, 0600); err != nil { + t.Fatalf("failed to write to %s: %v", proc1Cgroup, err) + } + inContainer, err = IsContainerized() + if err != nil { + t.Fatal(err) + } + if !inContainer { + t.Fatal("Wrongly assuming non-containerized") + } +} diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/parsers/parsers.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/parsers.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/parsers/parsers.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/parsers.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/parsers_test.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/parsers_test.go new file mode 100644 index 0000000000..12b8df5708 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/parsers_test.go @@ -0,0 +1,83 @@ +package parsers + +import ( + "testing" +) + +func TestParseHost(t *testing.T) { + var ( + defaultHttpHost = "127.0.0.1" + defaultUnix = "/var/run/docker.sock" + ) + if addr, err := ParseHost(defaultHttpHost, defaultUnix, "0.0.0.0"); err == nil { + t.Errorf("tcp 0.0.0.0 address expected error return, but err == nil, got %s", addr) + } + if addr, err := ParseHost(defaultHttpHost, defaultUnix, "tcp://"); err == nil { + t.Errorf("default tcp:// address expected error return, but err == nil, got %s", addr) + } + if addr, err := ParseHost(defaultHttpHost, defaultUnix, "0.0.0.1:5555"); err != nil || addr != "tcp://0.0.0.1:5555" { + t.Errorf("0.0.0.1:5555 -> expected tcp://0.0.0.1:5555, got %s", addr) + } + if addr, err := ParseHost(defaultHttpHost, defaultUnix, ":6666"); err != nil || addr != "tcp://127.0.0.1:6666" { + t.Errorf(":6666 -> expected tcp://127.0.0.1:6666, got %s", addr) + } + if addr, err := ParseHost(defaultHttpHost, defaultUnix, "tcp://:7777"); err != nil || addr != "tcp://127.0.0.1:7777" { + t.Errorf("tcp://:7777 -> expected tcp://127.0.0.1:7777, got %s", addr) + } + if addr, err := ParseHost(defaultHttpHost, defaultUnix, ""); err != nil || addr != "unix:///var/run/docker.sock" { + t.Errorf("empty argument -> expected unix:///var/run/docker.sock, got %s", addr) + } + if addr, err := ParseHost(defaultHttpHost, defaultUnix, "unix:///var/run/docker.sock"); err != nil || addr != "unix:///var/run/docker.sock" { + t.Errorf("unix:///var/run/docker.sock -> expected unix:///var/run/docker.sock, got %s", addr) + } + if addr, err := ParseHost(defaultHttpHost, defaultUnix, "unix://"); err != nil || addr != "unix:///var/run/docker.sock" { + t.Errorf("unix:///var/run/docker.sock -> expected unix:///var/run/docker.sock, got %s", addr) + } + if addr, err := ParseHost(defaultHttpHost, defaultUnix, "udp://127.0.0.1"); err == nil { + t.Errorf("udp protocol address expected error return, but err == nil. Got %s", addr) + } + if addr, err := ParseHost(defaultHttpHost, defaultUnix, "udp://127.0.0.1:2375"); err == nil { + t.Errorf("udp protocol address expected error return, but err == nil. Got %s", addr) + } +} + +func TestParseRepositoryTag(t *testing.T) { + if repo, tag := ParseRepositoryTag("root"); repo != "root" || tag != "" { + t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "root", "", repo, tag) + } + if repo, tag := ParseRepositoryTag("root:tag"); repo != "root" || tag != "tag" { + t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "root", "tag", repo, tag) + } + if repo, tag := ParseRepositoryTag("user/repo"); repo != "user/repo" || tag != "" { + t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "user/repo", "", repo, tag) + } + if repo, tag := ParseRepositoryTag("user/repo:tag"); repo != "user/repo" || tag != "tag" { + t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "user/repo", "tag", repo, tag) + } + if repo, tag := ParseRepositoryTag("url:5000/repo"); repo != "url:5000/repo" || tag != "" { + t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "url:5000/repo", "", repo, tag) + } + if repo, tag := ParseRepositoryTag("url:5000/repo:tag"); repo != "url:5000/repo" || tag != "tag" { + t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "url:5000/repo", "tag", repo, tag) + } +} + +func TestParsePortMapping(t *testing.T) { + data, err := PartParser("ip:public:private", "192.168.1.1:80:8080") + if err != nil { + t.Fatal(err) + } + + if len(data) != 3 { + t.FailNow() + } + if data["ip"] != "192.168.1.1" { + t.Fail() + } + if data["public"] != "80" { + t.Fail() + } + if data["private"] != "8080" { + t.Fail() + } +} diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/pools/pools.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/pools/pools.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools_nopool.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools_nopool.go new file mode 100644 index 0000000000..48903c2396 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools_nopool.go @@ -0,0 +1,73 @@ +// +build !go1.3 + +package pools + +import ( + "bufio" + "io" + + "github.com/docker/docker/pkg/ioutils" +) + +var ( + BufioReader32KPool *BufioReaderPool + BufioWriter32KPool *BufioWriterPool +) + +const buffer32K = 32 * 1024 + +type BufioReaderPool struct { + size int +} + +func init() { + BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) + BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) +} + +func newBufioReaderPoolWithSize(size int) *BufioReaderPool { + return &BufioReaderPool{size: size} +} + +func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader { + return bufio.NewReaderSize(r, bufPool.size) +} + +func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { + b.Reset(nil) +} + +func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser { + return ioutils.NewReadCloserWrapper(r, func() error { + if readCloser, ok := r.(io.ReadCloser); ok { + return readCloser.Close() + } + return nil + }) +} + +type BufioWriterPool struct { + size int +} + +func newBufioWriterPoolWithSize(size int) *BufioWriterPool { + return &BufioWriterPool{size: size} +} + +func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer { + return bufio.NewWriterSize(w, bufPool.size) +} + +func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { + b.Reset(nil) +} + +func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { + return ioutils.NewWriteCloserWrapper(w, func() error { + buf.Flush() + if writeCloser, ok := w.(io.WriteCloser); ok { + return writeCloser.Close() + } + return nil + }) +} diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/promise/promise.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/promise/promise.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/promise/promise.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/promise/promise.go diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/signal/signal.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/signal/signal.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/signal/signal.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/signal/signal.go diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/signal/signal_darwin.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/signal/signal_darwin.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/signal/signal_darwin.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/signal/signal_darwin.go diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/signal/signal_freebsd.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/signal/signal_freebsd.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/signal/signal_freebsd.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/signal/signal_freebsd.go diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/signal/signal_linux.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/signal/signal_linux.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/signal/signal_linux.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/signal/signal_linux.go diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/signal/signal_unsupported.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/signal/signal_unsupported.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/signal/signal_unsupported.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/signal/signal_unsupported.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/signal/trap.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/signal/trap.go new file mode 100644 index 0000000000..cbdfd1ff17 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/signal/trap.go @@ -0,0 +1,54 @@ +package signal + +import ( + "log" + "os" + gosignal "os/signal" + "sync/atomic" + "syscall" +) + +// Trap sets up a simplified signal "trap", appropriate for common +// behavior expected from a vanilla unix command-line tool in general +// (and the Docker engine in particular). +// +// * If SIGINT or SIGTERM are received, `cleanup` is called, then the process is terminated. +// * If SIGINT or SIGTERM are repeated 3 times before cleanup is complete, then cleanup is +// skipped and the process terminated directly. +// * If "DEBUG" is set in the environment, SIGQUIT causes an exit without cleanup. +// +func Trap(cleanup func()) { + c := make(chan os.Signal, 1) + signals := []os.Signal{os.Interrupt, syscall.SIGTERM} + if os.Getenv("DEBUG") == "" { + signals = append(signals, syscall.SIGQUIT) + } + gosignal.Notify(c, signals...) + go func() { + interruptCount := uint32(0) + for sig := range c { + go func(sig os.Signal) { + log.Printf("Received signal '%v', starting shutdown of docker...\n", sig) + switch sig { + case os.Interrupt, syscall.SIGTERM: + // If the user really wants to interrupt, let him do so. + if atomic.LoadUint32(&interruptCount) < 3 { + atomic.AddUint32(&interruptCount, 1) + // Initiate the cleanup only once + if atomic.LoadUint32(&interruptCount) == 1 { + // Call cleanup handler + cleanup() + os.Exit(0) + } else { + return + } + } else { + log.Printf("Force shutdown of docker, interrupting cleanup\n") + } + case syscall.SIGQUIT: + } + os.Exit(128 + int(sig.(syscall.Signal))) + }(sig) + } + }() +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/stdcopy/MAINTAINERS b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/stdcopy/MAINTAINERS new file mode 100644 index 0000000000..6dde4769d7 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/stdcopy/MAINTAINERS @@ -0,0 +1 @@ +Cristian Staretu (@unclejack) diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/stdcopy/stdcopy.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/stdcopy/stdcopy.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/stdcopy/stdcopy.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/stdcopy/stdcopy.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/stdcopy/stdcopy_test.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/stdcopy/stdcopy_test.go new file mode 100644 index 0000000000..14e6ed3115 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/stdcopy/stdcopy_test.go @@ -0,0 +1,20 @@ +package stdcopy + +import ( + "bytes" + "io/ioutil" + "testing" +) + +func BenchmarkWrite(b *testing.B) { + w := NewStdWriter(ioutil.Discard, Stdout) + data := []byte("Test line for testing stdwriter performance\n") + data = bytes.Repeat(data, 100) + b.SetBytes(int64(len(data))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, err := w.Write(data); err != nil { + b.Fatal(err) + } + } +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/sysinfo/MAINTAINERS b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/sysinfo/MAINTAINERS new file mode 100644 index 0000000000..68a97d2fc2 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/sysinfo/MAINTAINERS @@ -0,0 +1,2 @@ +Michael Crosby (@crosbymichael) +Victor Vieux (@vieux) diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/sysinfo/sysinfo.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/sysinfo/sysinfo.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/sysinfo/sysinfo.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/sysinfo/sysinfo.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/system/MAINTAINERS b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/system/MAINTAINERS new file mode 100644 index 0000000000..68a97d2fc2 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/system/MAINTAINERS @@ -0,0 +1,2 @@ +Michael Crosby (@crosbymichael) +Victor Vieux (@vieux) diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/system/errors.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/system/errors.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/system/errors.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/system/errors.go diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/system/stat_linux.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_linux.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/system/stat_linux.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_linux.go diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/system/stat_unsupported.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_unsupported.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/system/stat_unsupported.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_unsupported.go diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/system/utimes_darwin.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_darwin.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/system/utimes_darwin.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_darwin.go diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/system/utimes_freebsd.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_freebsd.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/system/utimes_freebsd.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_freebsd.go diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/system/utimes_linux.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_linux.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/system/utimes_linux.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_linux.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_test.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_test.go new file mode 100644 index 0000000000..38e4020cb5 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_test.go @@ -0,0 +1,64 @@ +package system + +import ( + "io/ioutil" + "os" + "path/filepath" + "syscall" + "testing" +) + +func prepareFiles(t *testing.T) (string, string, string) { + dir, err := ioutil.TempDir("", "docker-system-test") + if err != nil { + t.Fatal(err) + } + + file := filepath.Join(dir, "exist") + if err := ioutil.WriteFile(file, []byte("hello"), 0644); err != nil { + t.Fatal(err) + } + + invalid := filepath.Join(dir, "doesnt-exist") + + symlink := filepath.Join(dir, "symlink") + if err := os.Symlink(file, symlink); err != nil { + t.Fatal(err) + } + + return file, invalid, symlink +} + +func TestLUtimesNano(t *testing.T) { + file, invalid, symlink := prepareFiles(t) + + before, err := os.Stat(file) + if err != nil { + t.Fatal(err) + } + + ts := []syscall.Timespec{{0, 0}, {0, 0}} + if err := LUtimesNano(symlink, ts); err != nil { + t.Fatal(err) + } + + symlinkInfo, err := os.Lstat(symlink) + if err != nil { + t.Fatal(err) + } + if before.ModTime().Unix() == symlinkInfo.ModTime().Unix() { + t.Fatal("The modification time of the symlink should be different") + } + + fileInfo, err := os.Stat(file) + if err != nil { + t.Fatal(err) + } + if before.ModTime().Unix() != fileInfo.ModTime().Unix() { + t.Fatal("The modification time of the file should be same") + } + + if err := LUtimesNano(invalid, ts); err == nil { + t.Fatal("Doesn't return an error on a non-existing file") + } +} diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/system/utimes_unsupported.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_unsupported.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/system/utimes_unsupported.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_unsupported.go diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/system/xattrs_linux.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_linux.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/system/xattrs_linux.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_linux.go diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/system/xattrs_unsupported.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_unsupported.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/system/xattrs_unsupported.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_unsupported.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/MAINTAINER b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/MAINTAINER new file mode 100644 index 0000000000..bd492e8394 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/MAINTAINER @@ -0,0 +1 @@ +Eric Windisch (@ewindisch) diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/fileinfosums.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/fileinfosums.go new file mode 100644 index 0000000000..f9f468098c --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/fileinfosums.go @@ -0,0 +1,125 @@ +package tarsum + +import "sort" + +// This info will be accessed through interface so the actual name and sum cannot be medled with +type FileInfoSumInterface interface { + // File name + Name() string + // Checksum of this particular file and its headers + Sum() string + // Position of file in the tar + Pos() int64 +} + +type fileInfoSum struct { + name string + sum string + pos int64 +} + +func (fis fileInfoSum) Name() string { + return fis.name +} +func (fis fileInfoSum) Sum() string { + return fis.sum +} +func (fis fileInfoSum) Pos() int64 { + return fis.pos +} + +type FileInfoSums []FileInfoSumInterface + +// GetFile returns the first FileInfoSumInterface with a matching name +func (fis FileInfoSums) GetFile(name string) FileInfoSumInterface { + for i := range fis { + if fis[i].Name() == name { + return fis[i] + } + } + return nil +} + +// GetAllFile returns a FileInfoSums with all matching names +func (fis FileInfoSums) GetAllFile(name string) FileInfoSums { + f := FileInfoSums{} + for i := range fis { + if fis[i].Name() == name { + f = append(f, fis[i]) + } + } + return f +} + +func contains(s []string, e string) bool { + for _, a := range s { + if a == e { + return true + } + } + return false +} + +func (fis FileInfoSums) GetDuplicatePaths() (dups FileInfoSums) { + seen := make(map[string]int, len(fis)) // allocate earl. no need to grow this map. + for i := range fis { + f := fis[i] + if _, ok := seen[f.Name()]; ok { + dups = append(dups, f) + } else { + seen[f.Name()] = 0 + } + } + return dups +} + +func (fis FileInfoSums) Len() int { return len(fis) } +func (fis FileInfoSums) Swap(i, j int) { fis[i], fis[j] = fis[j], fis[i] } + +func (fis FileInfoSums) SortByPos() { + sort.Sort(byPos{fis}) +} + +func (fis FileInfoSums) SortByNames() { + sort.Sort(byName{fis}) +} + +func (fis FileInfoSums) SortBySums() { + dups := fis.GetDuplicatePaths() + if len(dups) > 0 { + sort.Sort(bySum{fis, dups}) + } else { + sort.Sort(bySum{fis, nil}) + } +} + +// byName is a sort.Sort helper for sorting by file names. +// If names are the same, order them by their appearance in the tar archive +type byName struct{ FileInfoSums } + +func (bn byName) Less(i, j int) bool { + if bn.FileInfoSums[i].Name() == bn.FileInfoSums[j].Name() { + return bn.FileInfoSums[i].Pos() < bn.FileInfoSums[j].Pos() + } + return bn.FileInfoSums[i].Name() < bn.FileInfoSums[j].Name() +} + +// bySum is a sort.Sort helper for sorting by the sums of all the fileinfos in the tar archive +type bySum struct { + FileInfoSums + dups FileInfoSums +} + +func (bs bySum) Less(i, j int) bool { + if bs.dups != nil && bs.FileInfoSums[i].Name() == bs.FileInfoSums[j].Name() { + return bs.FileInfoSums[i].Pos() < bs.FileInfoSums[j].Pos() + } + return bs.FileInfoSums[i].Sum() < bs.FileInfoSums[j].Sum() +} + +// byPos is a sort.Sort helper for sorting by the sums of all the fileinfos by their original order +type byPos struct{ FileInfoSums } + +func (bp byPos) Less(i, j int) bool { + return bp.FileInfoSums[i].Pos() < bp.FileInfoSums[j].Pos() +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/fileinfosums_test.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/fileinfosums_test.go new file mode 100644 index 0000000000..e1c6cc1238 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/fileinfosums_test.go @@ -0,0 +1,45 @@ +package tarsum + +import "testing" + +func newFileInfoSums() FileInfoSums { + return FileInfoSums{ + fileInfoSum{name: "file3", sum: "2abcdef1234567890", pos: 2}, + fileInfoSum{name: "dup1", sum: "deadbeef1", pos: 5}, + fileInfoSum{name: "file1", sum: "0abcdef1234567890", pos: 0}, + fileInfoSum{name: "file4", sum: "3abcdef1234567890", pos: 3}, + fileInfoSum{name: "dup1", sum: "deadbeef0", pos: 4}, + fileInfoSum{name: "file2", sum: "1abcdef1234567890", pos: 1}, + } +} + +func TestSortFileInfoSums(t *testing.T) { + dups := newFileInfoSums().GetAllFile("dup1") + if len(dups) != 2 { + t.Errorf("expected length 2, got %d", len(dups)) + } + dups.SortByNames() + if dups[0].Pos() != 4 { + t.Errorf("sorted dups should be ordered by position. Expected 4, got %d", dups[0].Pos()) + } + + fis := newFileInfoSums() + expected := "0abcdef1234567890" + fis.SortBySums() + got := fis[0].Sum() + if got != expected { + t.Errorf("Expected %q, got %q", expected, got) + } + + fis = newFileInfoSums() + expected = "dup1" + fis.SortByNames() + gotFis := fis[0] + if gotFis.Name() != expected { + t.Errorf("Expected %q, got %q", expected, gotFis.Name()) + } + // since a duplicate is first, ensure it is ordered first by position too + if gotFis.Pos() != 4 { + t.Errorf("Expected %d, got %d", 4, gotFis.Pos()) + } +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum.go new file mode 100644 index 0000000000..6581f3f234 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum.go @@ -0,0 +1,285 @@ +package tarsum + +import ( + "bytes" + "compress/gzip" + "crypto/sha256" + "encoding/hex" + "hash" + "io" + "sort" + "strconv" + "strings" + + "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" + + "github.com/docker/docker/pkg/log" +) + +const ( + buf8K = 8 * 1024 + buf16K = 16 * 1024 + buf32K = 32 * 1024 +) + +// NewTarSum creates a new interface for calculating a fixed time checksum of a +// tar archive. +// +// This is used for calculating checksums of layers of an image, in some cases +// including the byte payload of the image's json metadata as well, and for +// calculating the checksums for buildcache. +func NewTarSum(r io.Reader, dc bool, v Version) (TarSum, error) { + if _, ok := tarSumVersions[v]; !ok { + return nil, ErrVersionNotImplemented + } + return &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v}, nil +} + +// Create a new TarSum, providing a THash to use rather than the DefaultTHash +func NewTarSumHash(r io.Reader, dc bool, v Version, tHash THash) (TarSum, error) { + if _, ok := tarSumVersions[v]; !ok { + return nil, ErrVersionNotImplemented + } + return &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, tHash: tHash}, nil +} + +// TarSum is the generic interface for calculating fixed time +// checksums of a tar archive +type TarSum interface { + io.Reader + GetSums() FileInfoSums + Sum([]byte) string + Version() Version + Hash() THash +} + +// tarSum struct is the structure for a Version0 checksum calculation +type tarSum struct { + io.Reader + tarR *tar.Reader + tarW *tar.Writer + writer writeCloseFlusher + bufTar *bytes.Buffer + bufWriter *bytes.Buffer + bufData []byte + h hash.Hash + tHash THash + sums FileInfoSums + fileCounter int64 + currentFile string + finished bool + first bool + DisableCompression bool // false by default. When false, the output gzip compressed. + tarSumVersion Version // this field is not exported so it can not be mutated during use +} + +func (ts tarSum) Hash() THash { + return ts.tHash +} + +func (ts tarSum) Version() Version { + return ts.tarSumVersion +} + +// A hash.Hash type generator and its name +type THash interface { + Hash() hash.Hash + Name() string +} + +// Convenience method for creating a THash +func NewTHash(name string, h func() hash.Hash) THash { + return simpleTHash{n: name, h: h} +} + +// TarSum default is "sha256" +var DefaultTHash = NewTHash("sha256", sha256.New) + +type simpleTHash struct { + n string + h func() hash.Hash +} + +func (sth simpleTHash) Name() string { return sth.n } +func (sth simpleTHash) Hash() hash.Hash { return sth.h() } + +func (ts tarSum) selectHeaders(h *tar.Header, v Version) (set [][2]string) { + for _, elem := range [][2]string{ + {"name", h.Name}, + {"mode", strconv.Itoa(int(h.Mode))}, + {"uid", strconv.Itoa(h.Uid)}, + {"gid", strconv.Itoa(h.Gid)}, + {"size", strconv.Itoa(int(h.Size))}, + {"mtime", strconv.Itoa(int(h.ModTime.UTC().Unix()))}, + {"typeflag", string([]byte{h.Typeflag})}, + {"linkname", h.Linkname}, + {"uname", h.Uname}, + {"gname", h.Gname}, + {"devmajor", strconv.Itoa(int(h.Devmajor))}, + {"devminor", strconv.Itoa(int(h.Devminor))}, + } { + if v >= VersionDev && elem[0] == "mtime" { + continue + } + set = append(set, elem) + } + return +} + +func (ts *tarSum) encodeHeader(h *tar.Header) error { + for _, elem := range ts.selectHeaders(h, ts.Version()) { + if _, err := ts.h.Write([]byte(elem[0] + elem[1])); err != nil { + return err + } + } + + // include the additional pax headers, from an ordered list + if ts.Version() >= VersionDev { + var keys []string + for k := range h.Xattrs { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + if _, err := ts.h.Write([]byte(k + h.Xattrs[k])); err != nil { + return err + } + } + } + return nil +} + +func (ts *tarSum) initTarSum() error { + ts.bufTar = bytes.NewBuffer([]byte{}) + ts.bufWriter = bytes.NewBuffer([]byte{}) + ts.tarR = tar.NewReader(ts.Reader) + ts.tarW = tar.NewWriter(ts.bufTar) + if !ts.DisableCompression { + ts.writer = gzip.NewWriter(ts.bufWriter) + } else { + ts.writer = &nopCloseFlusher{Writer: ts.bufWriter} + } + if ts.tHash == nil { + ts.tHash = DefaultTHash + } + ts.h = ts.tHash.Hash() + ts.h.Reset() + ts.first = true + ts.sums = FileInfoSums{} + return nil +} + +func (ts *tarSum) Read(buf []byte) (int, error) { + if ts.writer == nil { + if err := ts.initTarSum(); err != nil { + return 0, err + } + } + + if ts.finished { + return ts.bufWriter.Read(buf) + } + if len(ts.bufData) < len(buf) { + switch { + case len(buf) <= buf8K: + ts.bufData = make([]byte, buf8K) + case len(buf) <= buf16K: + ts.bufData = make([]byte, buf16K) + case len(buf) <= buf32K: + ts.bufData = make([]byte, buf32K) + default: + ts.bufData = make([]byte, len(buf)) + } + } + buf2 := ts.bufData[:len(buf)] + + n, err := ts.tarR.Read(buf2) + if err != nil { + if err == io.EOF { + if _, err := ts.h.Write(buf2[:n]); err != nil { + return 0, err + } + if !ts.first { + ts.sums = append(ts.sums, fileInfoSum{name: ts.currentFile, sum: hex.EncodeToString(ts.h.Sum(nil)), pos: ts.fileCounter}) + ts.fileCounter++ + ts.h.Reset() + } else { + ts.first = false + } + + currentHeader, err := ts.tarR.Next() + if err != nil { + if err == io.EOF { + if err := ts.tarW.Close(); err != nil { + return 0, err + } + if _, err := io.Copy(ts.writer, ts.bufTar); err != nil { + return 0, err + } + if err := ts.writer.Close(); err != nil { + return 0, err + } + ts.finished = true + return n, nil + } + return n, err + } + ts.currentFile = strings.TrimSuffix(strings.TrimPrefix(currentHeader.Name, "./"), "/") + if err := ts.encodeHeader(currentHeader); err != nil { + return 0, err + } + if err := ts.tarW.WriteHeader(currentHeader); err != nil { + return 0, err + } + if _, err := ts.tarW.Write(buf2[:n]); err != nil { + return 0, err + } + ts.tarW.Flush() + if _, err := io.Copy(ts.writer, ts.bufTar); err != nil { + return 0, err + } + ts.writer.Flush() + + return ts.bufWriter.Read(buf) + } + return n, err + } + + // Filling the hash buffer + if _, err = ts.h.Write(buf2[:n]); err != nil { + return 0, err + } + + // Filling the tar writter + if _, err = ts.tarW.Write(buf2[:n]); err != nil { + return 0, err + } + ts.tarW.Flush() + + // Filling the output writer + if _, err = io.Copy(ts.writer, ts.bufTar); err != nil { + return 0, err + } + ts.writer.Flush() + + return ts.bufWriter.Read(buf) +} + +func (ts *tarSum) Sum(extra []byte) string { + ts.sums.SortBySums() + h := ts.tHash.Hash() + if extra != nil { + h.Write(extra) + } + for _, fis := range ts.sums { + log.Debugf("-->%s<--", fis.Sum()) + h.Write([]byte(fis.Sum())) + } + checksum := ts.Version().String() + "+" + ts.tHash.Name() + ":" + hex.EncodeToString(h.Sum(nil)) + log.Debugf("checksum processed: %s", checksum) + return checksum +} + +func (ts *tarSum) GetSums() FileInfoSums { + return ts.sums +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum_test.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum_test.go new file mode 100644 index 0000000000..1e06cda178 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum_test.go @@ -0,0 +1,408 @@ +package tarsum + +import ( + "bytes" + "compress/gzip" + "crypto/md5" + "crypto/rand" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "os" + "testing" + + "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" +) + +type testLayer struct { + filename string + options *sizedOptions + jsonfile string + gzip bool + tarsum string + version Version + hash THash +} + +var testLayers = []testLayer{ + { + filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", + jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", + version: Version0, + tarsum: "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b"}, + { + filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", + jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", + version: VersionDev, + tarsum: "tarsum.dev+sha256:486b86e25c4db4551228154848bc4663b15dd95784b1588980f4ba1cb42e83e9"}, + { + filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", + jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", + gzip: true, + tarsum: "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b"}, + { + // Tests existing version of TarSum when xattrs are present + filename: "testdata/xattr/layer.tar", + jsonfile: "testdata/xattr/json", + version: Version0, + tarsum: "tarsum+sha256:e86f81a4d552f13039b1396ed03ca968ea9717581f9577ef1876ea6ff9b38c98"}, + { + // Tests next version of TarSum when xattrs are present + filename: "testdata/xattr/layer.tar", + jsonfile: "testdata/xattr/json", + version: VersionDev, + tarsum: "tarsum.dev+sha256:6235cd3a2afb7501bac541772a3d61a3634e95bc90bb39a4676e2cb98d08390d"}, + { + filename: "testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar", + jsonfile: "testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json", + tarsum: "tarsum+sha256:ac672ee85da9ab7f9667ae3c32841d3e42f33cc52c273c23341dabba1c8b0c8b"}, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha256:8bf12d7e67c51ee2e8306cba569398b1b9f419969521a12ffb9d8875e8836738"}, + { + // this tar has two files with the same path + filename: "testdata/collision/collision-0.tar", + tarsum: "tarsum+sha256:08653904a68d3ab5c59e65ef58c49c1581caa3c34744f8d354b3f575ea04424a"}, + { + // this tar has the same two files (with the same path), but reversed order. ensuring is has different hash than above + filename: "testdata/collision/collision-1.tar", + tarsum: "tarsum+sha256:b51c13fbefe158b5ce420d2b930eef54c5cd55c50a2ee4abdddea8fa9f081e0d"}, + { + // this tar has newer of collider-0.tar, ensuring is has different hash + filename: "testdata/collision/collision-2.tar", + tarsum: "tarsum+sha256:381547080919bb82691e995508ae20ed33ce0f6948d41cafbeb70ce20c73ee8e"}, + { + // this tar has newer of collider-1.tar, ensuring is has different hash + filename: "testdata/collision/collision-3.tar", + tarsum: "tarsum+sha256:f886e431c08143164a676805205979cd8fa535dfcef714db5515650eea5a7c0f"}, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+md5:0d7529ec7a8360155b48134b8e599f53", + hash: md5THash, + }, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha1:f1fee39c5925807ff75ef1925e7a23be444ba4df", + hash: sha1Hash, + }, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha224:6319390c0b061d639085d8748b14cd55f697cf9313805218b21cf61c", + hash: sha224Hash, + }, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha384:a578ce3ce29a2ae03b8ed7c26f47d0f75b4fc849557c62454be4b5ffd66ba021e713b48ce71e947b43aab57afd5a7636", + hash: sha384Hash, + }, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha512:e9bfb90ca5a4dfc93c46ee061a5cf9837de6d2fdf82544d6460d3147290aecfabf7b5e415b9b6e72db9b8941f149d5d69fb17a394cbfaf2eac523bd9eae21855", + hash: sha512Hash, + }, +} + +type sizedOptions struct { + num int64 + size int64 + isRand bool + realFile bool +} + +// make a tar: +// * num is the number of files the tar should have +// * size is the bytes per file +// * isRand is whether the contents of the files should be a random chunk (otherwise it's all zeros) +// * realFile will write to a TempFile, instead of an in memory buffer +func sizedTar(opts sizedOptions) io.Reader { + var ( + fh io.ReadWriter + err error + ) + if opts.realFile { + fh, err = ioutil.TempFile("", "tarsum") + if err != nil { + return nil + } + } else { + fh = bytes.NewBuffer([]byte{}) + } + tarW := tar.NewWriter(fh) + for i := int64(0); i < opts.num; i++ { + err := tarW.WriteHeader(&tar.Header{ + Name: fmt.Sprintf("/testdata%d", i), + Mode: 0755, + Uid: 0, + Gid: 0, + Size: opts.size, + }) + if err != nil { + return nil + } + var rBuf []byte + if opts.isRand { + rBuf = make([]byte, 8) + _, err = rand.Read(rBuf) + if err != nil { + return nil + } + } else { + rBuf = []byte{0, 0, 0, 0, 0, 0, 0, 0} + } + + for i := int64(0); i < opts.size/int64(8); i++ { + tarW.Write(rBuf) + } + } + return fh +} + +func emptyTarSum(gzip bool) (TarSum, error) { + reader, writer := io.Pipe() + tarWriter := tar.NewWriter(writer) + + // Immediately close tarWriter and write-end of the + // Pipe in a separate goroutine so we don't block. + go func() { + tarWriter.Close() + writer.Close() + }() + + return NewTarSum(reader, !gzip, Version0) +} + +// TestEmptyTar tests that tarsum does not fail to read an empty tar +// and correctly returns the hex digest of an empty hash. +func TestEmptyTar(t *testing.T) { + // Test without gzip. + ts, err := emptyTarSum(false) + if err != nil { + t.Fatal(err) + } + + zeroBlock := make([]byte, 1024) + buf := new(bytes.Buffer) + + n, err := io.Copy(buf, ts) + if err != nil { + t.Fatal(err) + } + + if n != int64(len(zeroBlock)) || !bytes.Equal(buf.Bytes(), zeroBlock) { + t.Fatalf("tarSum did not write the correct number of zeroed bytes: %d", n) + } + + expectedSum := ts.Version().String() + "+sha256:" + hex.EncodeToString(sha256.New().Sum(nil)) + resultSum := ts.Sum(nil) + + if resultSum != expectedSum { + t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) + } + + // Test with gzip. + ts, err = emptyTarSum(true) + if err != nil { + t.Fatal(err) + } + buf.Reset() + + n, err = io.Copy(buf, ts) + if err != nil { + t.Fatal(err) + } + + bufgz := new(bytes.Buffer) + gz := gzip.NewWriter(bufgz) + n, err = io.Copy(gz, bytes.NewBuffer(zeroBlock)) + gz.Close() + gzBytes := bufgz.Bytes() + + if n != int64(len(zeroBlock)) || !bytes.Equal(buf.Bytes(), gzBytes) { + t.Fatalf("tarSum did not write the correct number of gzipped-zeroed bytes: %d", n) + } + + resultSum = ts.Sum(nil) + + if resultSum != expectedSum { + t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) + } +} + +var ( + md5THash = NewTHash("md5", md5.New) + sha1Hash = NewTHash("sha1", sha1.New) + sha224Hash = NewTHash("sha224", sha256.New224) + sha384Hash = NewTHash("sha384", sha512.New384) + sha512Hash = NewTHash("sha512", sha512.New) +) + +func TestTarSums(t *testing.T) { + for _, layer := range testLayers { + var ( + fh io.Reader + err error + ) + if len(layer.filename) > 0 { + fh, err = os.Open(layer.filename) + if err != nil { + t.Errorf("failed to open %s: %s", layer.filename, err) + continue + } + } else if layer.options != nil { + fh = sizedTar(*layer.options) + } else { + // What else is there to test? + t.Errorf("what to do with %#v", layer) + continue + } + if file, ok := fh.(*os.File); ok { + defer file.Close() + } + + var ts TarSum + if layer.hash == nil { + // double negatives! + ts, err = NewTarSum(fh, !layer.gzip, layer.version) + } else { + ts, err = NewTarSumHash(fh, !layer.gzip, layer.version, layer.hash) + } + if err != nil { + t.Errorf("%q :: %q", err, layer.filename) + continue + } + + // Read variable number of bytes to test dynamic buffer + dBuf := make([]byte, 1) + _, err = ts.Read(dBuf) + if err != nil { + t.Errorf("failed to read 1B from %s: %s", layer.filename, err) + continue + } + dBuf = make([]byte, 16*1024) + _, err = ts.Read(dBuf) + if err != nil { + t.Errorf("failed to read 16KB from %s: %s", layer.filename, err) + continue + } + + // Read and discard remaining bytes + _, err = io.Copy(ioutil.Discard, ts) + if err != nil { + t.Errorf("failed to copy from %s: %s", layer.filename, err) + continue + } + var gotSum string + if len(layer.jsonfile) > 0 { + jfh, err := os.Open(layer.jsonfile) + if err != nil { + t.Errorf("failed to open %s: %s", layer.jsonfile, err) + continue + } + buf, err := ioutil.ReadAll(jfh) + if err != nil { + t.Errorf("failed to readAll %s: %s", layer.jsonfile, err) + continue + } + gotSum = ts.Sum(buf) + } else { + gotSum = ts.Sum(nil) + } + + if layer.tarsum != gotSum { + t.Errorf("expecting [%s], but got [%s]", layer.tarsum, gotSum) + } + } +} + +func Benchmark9kTar(b *testing.B) { + buf := bytes.NewBuffer([]byte{}) + fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar") + if err != nil { + b.Error(err) + return + } + n, err := io.Copy(buf, fh) + fh.Close() + + b.SetBytes(n) + b.ResetTimer() + for i := 0; i < b.N; i++ { + ts, err := NewTarSum(buf, true, Version0) + if err != nil { + b.Error(err) + return + } + io.Copy(ioutil.Discard, ts) + ts.Sum(nil) + } +} + +func Benchmark9kTarGzip(b *testing.B) { + buf := bytes.NewBuffer([]byte{}) + fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar") + if err != nil { + b.Error(err) + return + } + n, err := io.Copy(buf, fh) + fh.Close() + + b.SetBytes(n) + b.ResetTimer() + for i := 0; i < b.N; i++ { + ts, err := NewTarSum(buf, false, Version0) + if err != nil { + b.Error(err) + return + } + io.Copy(ioutil.Discard, ts) + ts.Sum(nil) + } +} + +// this is a single big file in the tar archive +func Benchmark1mbSingleFileTar(b *testing.B) { + benchmarkTar(b, sizedOptions{1, 1024 * 1024, true, true}, false) +} + +// this is a single big file in the tar archive +func Benchmark1mbSingleFileTarGzip(b *testing.B) { + benchmarkTar(b, sizedOptions{1, 1024 * 1024, true, true}, true) +} + +// this is 1024 1k files in the tar archive +func Benchmark1kFilesTar(b *testing.B) { + benchmarkTar(b, sizedOptions{1024, 1024, true, true}, false) +} + +// this is 1024 1k files in the tar archive +func Benchmark1kFilesTarGzip(b *testing.B) { + benchmarkTar(b, sizedOptions{1024, 1024, true, true}, true) +} + +func benchmarkTar(b *testing.B, opts sizedOptions, isGzip bool) { + var fh *os.File + tarReader := sizedTar(opts) + if br, ok := tarReader.(*os.File); ok { + fh = br + } + defer os.Remove(fh.Name()) + defer fh.Close() + + b.SetBytes(opts.size * opts.num) + b.ResetTimer() + for i := 0; i < b.N; i++ { + ts, err := NewTarSum(fh, !isGzip, Version0) + if err != nil { + b.Error(err) + return + } + io.Copy(ioutil.Discard, ts) + ts.Sum(nil) + fh.Seek(0, 0) + } +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json new file mode 100644 index 0000000000..0f0ba4974d --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json @@ -0,0 +1 @@ +{"id":"46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457","parent":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","created":"2014-04-07T02:45:52.610504484Z","container":"e0f07f8d72cae171a3dcc35859960e7e956e0628bce6fedc4122bf55b2c287c7","container_config":{"Hostname":"88807319f25e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh","-c","sed -ri 's/^(%wheel.*)(ALL)$/\\1NOPASSWD: \\2/' /etc/sudoers"],"Image":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"docker_version":"0.9.1-dev","config":{"Hostname":"88807319f25e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":null,"Image":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"architecture":"amd64","os":"linux","Size":3425} \ No newline at end of file diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar new file mode 100644 index 0000000000..dfd5c204ae Binary files /dev/null and b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar differ diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json new file mode 100644 index 0000000000..12c18a076f --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json @@ -0,0 +1 @@ +{"id":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158","comment":"Imported from -","created":"2013-06-13T14:03:50.821769-07:00","container_config":{"Hostname":"","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":null},"docker_version":"0.4.0","architecture":"x86_64","Size":0} \ No newline at end of file diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar new file mode 100644 index 0000000000..880b3f2c56 Binary files /dev/null and b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar differ diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-0.tar b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-0.tar new file mode 100644 index 0000000000..1c636b3bc7 Binary files /dev/null and b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-0.tar differ diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-1.tar b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-1.tar new file mode 100644 index 0000000000..b411be9785 Binary files /dev/null and b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-1.tar differ diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-2.tar b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-2.tar new file mode 100644 index 0000000000..7b5c04a964 Binary files /dev/null and b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-2.tar differ diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-3.tar b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-3.tar new file mode 100644 index 0000000000..f8c64586d2 Binary files /dev/null and b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-3.tar differ diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/xattr/json b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/xattr/json new file mode 100644 index 0000000000..328ea31fd9 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/xattr/json @@ -0,0 +1 @@ +{"id":"4439c3c7f847954100b42b267e7e5529cac1d6934db082f65795c5ca2e594d93","parent":"73b164f4437db87e96e90083c73a6592f549646ae2ec00ed33c6b9b49a5c4470","created":"2014-05-16T17:19:44.091534414Z","container":"5f92fb06cc58f357f0cde41394e2bbbb664e663974b2ac1693ab07b7a306749b","container_config":{"Hostname":"9565c6517a0e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh","-c","setcap 'cap_setgid,cap_setuid+ep' ./file \u0026\u0026 getcap ./file"],"Image":"73b164f4437db87e96e90083c73a6592f549646ae2ec00ed33c6b9b49a5c4470","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"docker_version":"0.11.1-dev","config":{"Hostname":"9565c6517a0e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":null,"Image":"73b164f4437db87e96e90083c73a6592f549646ae2ec00ed33c6b9b49a5c4470","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"architecture":"amd64","os":"linux","Size":0} \ No newline at end of file diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/xattr/layer.tar b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/xattr/layer.tar new file mode 100644 index 0000000000..819351d42f Binary files /dev/null and b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/xattr/layer.tar differ diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/versioning.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/versioning.go new file mode 100644 index 0000000000..e1161fc5ab --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/versioning.go @@ -0,0 +1,56 @@ +package tarsum + +import ( + "errors" + "strings" +) + +// versioning of the TarSum algorithm +// based on the prefix of the hash used +// i.e. "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b" +type Version int + +const ( + // Prefix of "tarsum" + Version0 Version = iota + // Prefix of "tarsum.dev" + // NOTE: this variable will be of an unsettled next-version of the TarSum calculation + VersionDev +) + +// Get a list of all known tarsum Version +func GetVersions() []Version { + v := []Version{} + for k := range tarSumVersions { + v = append(v, k) + } + return v +} + +var tarSumVersions = map[Version]string{ + 0: "tarsum", + 1: "tarsum.dev", +} + +func (tsv Version) String() string { + return tarSumVersions[tsv] +} + +// GetVersionFromTarsum returns the Version from the provided string +func GetVersionFromTarsum(tarsum string) (Version, error) { + tsv := tarsum + if strings.Contains(tarsum, "+") { + tsv = strings.SplitN(tarsum, "+", 2)[0] + } + for v, s := range tarSumVersions { + if s == tsv { + return v, nil + } + } + return -1, ErrNotVersion +} + +var ( + ErrNotVersion = errors.New("string does not include a TarSum Version") + ErrVersionNotImplemented = errors.New("TarSum Version is not yet implemented") +) diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/versioning_test.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/versioning_test.go new file mode 100644 index 0000000000..b851c3be6f --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/versioning_test.go @@ -0,0 +1,49 @@ +package tarsum + +import ( + "testing" +) + +func TestVersion(t *testing.T) { + expected := "tarsum" + var v Version + if v.String() != expected { + t.Errorf("expected %q, got %q", expected, v.String()) + } + + expected = "tarsum.dev" + v = 1 + if v.String() != expected { + t.Errorf("expected %q, got %q", expected, v.String()) + } +} + +func TestGetVersion(t *testing.T) { + testSet := []struct { + Str string + Expected Version + }{ + {"tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", Version0}, + {"tarsum+sha256", Version0}, + {"tarsum", Version0}, + {"tarsum.dev", VersionDev}, + {"tarsum.dev+sha256:deadbeef", VersionDev}, + } + + for _, ts := range testSet { + v, err := GetVersionFromTarsum(ts.Str) + if err != nil { + t.Fatalf("%q : %s", err, ts.Str) + } + if v != ts.Expected { + t.Errorf("expected %d (%q), got %d (%q)", ts.Expected, ts.Expected, v, v) + } + } + + // test one that does not exist, to ensure it errors + str := "weak+md5:abcdeabcde" + _, err := GetVersionFromTarsum(str) + if err != ErrNotVersion { + t.Fatalf("%q : %s", err, str) + } +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/writercloser.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/writercloser.go new file mode 100644 index 0000000000..9727ecde3e --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/writercloser.go @@ -0,0 +1,22 @@ +package tarsum + +import ( + "io" +) + +type writeCloseFlusher interface { + io.WriteCloser + Flush() error +} + +type nopCloseFlusher struct { + io.Writer +} + +func (n *nopCloseFlusher) Close() error { + return nil +} + +func (n *nopCloseFlusher) Flush() error { + return nil +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/term/MAINTAINERS b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/term/MAINTAINERS new file mode 100644 index 0000000000..aee10c8421 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/term/MAINTAINERS @@ -0,0 +1 @@ +Solomon Hykes (@shykes) diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/term/term.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/term/term.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/term/term.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/term/term.go diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/term/termios_darwin.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_darwin.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/term/termios_darwin.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_darwin.go diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/term/termios_freebsd.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_freebsd.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/term/termios_freebsd.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_freebsd.go diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/term/termios_linux.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_linux.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/term/termios_linux.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_linux.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/timeutils/MAINTAINERS b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/timeutils/MAINTAINERS new file mode 100644 index 0000000000..6dde4769d7 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/timeutils/MAINTAINERS @@ -0,0 +1 @@ +Cristian Staretu (@unclejack) diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/timeutils/json.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/timeutils/json.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/timeutils/json.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/timeutils/json.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/truncindex/MAINTAINERS b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/truncindex/MAINTAINERS new file mode 100644 index 0000000000..6dde4769d7 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/truncindex/MAINTAINERS @@ -0,0 +1 @@ +Cristian Staretu (@unclejack) diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/truncindex/truncindex.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/truncindex/truncindex.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/truncindex/truncindex.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/truncindex/truncindex.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/truncindex/truncindex_test.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/truncindex/truncindex_test.go new file mode 100644 index 0000000000..32c41c7d76 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/truncindex/truncindex_test.go @@ -0,0 +1,401 @@ +package truncindex + +import ( + "math/rand" + "testing" + + "github.com/docker/docker/utils" +) + +// Test the behavior of TruncIndex, an index for querying IDs from a non-conflicting prefix. +func TestTruncIndex(t *testing.T) { + ids := []string{} + index := NewTruncIndex(ids) + // Get on an empty index + if _, err := index.Get("foobar"); err == nil { + t.Fatal("Get on an empty index should return an error") + } + + // Spaces should be illegal in an id + if err := index.Add("I have a space"); err == nil { + t.Fatalf("Adding an id with ' ' should return an error") + } + + id := "99b36c2c326ccc11e726eee6ee78a0baf166ef96" + // Add an id + if err := index.Add(id); err != nil { + t.Fatal(err) + } + + // Add an empty id (should fail) + if err := index.Add(""); err == nil { + t.Fatalf("Adding an empty id should return an error") + } + + // Get a non-existing id + assertIndexGet(t, index, "abracadabra", "", true) + // Get an empty id + assertIndexGet(t, index, "", "", true) + // Get the exact id + assertIndexGet(t, index, id, id, false) + // The first letter should match + assertIndexGet(t, index, id[:1], id, false) + // The first half should match + assertIndexGet(t, index, id[:len(id)/2], id, false) + // The second half should NOT match + assertIndexGet(t, index, id[len(id)/2:], "", true) + + id2 := id[:6] + "blabla" + // Add an id + if err := index.Add(id2); err != nil { + t.Fatal(err) + } + // Both exact IDs should work + assertIndexGet(t, index, id, id, false) + assertIndexGet(t, index, id2, id2, false) + + // 6 characters or less should conflict + assertIndexGet(t, index, id[:6], "", true) + assertIndexGet(t, index, id[:4], "", true) + assertIndexGet(t, index, id[:1], "", true) + + // 7 characters should NOT conflict + assertIndexGet(t, index, id[:7], id, false) + assertIndexGet(t, index, id2[:7], id2, false) + + // Deleting a non-existing id should return an error + if err := index.Delete("non-existing"); err == nil { + t.Fatalf("Deleting a non-existing id should return an error") + } + + // Deleting an empty id should return an error + if err := index.Delete(""); err == nil { + t.Fatal("Deleting an empty id should return an error") + } + + // Deleting id2 should remove conflicts + if err := index.Delete(id2); err != nil { + t.Fatal(err) + } + // id2 should no longer work + assertIndexGet(t, index, id2, "", true) + assertIndexGet(t, index, id2[:7], "", true) + assertIndexGet(t, index, id2[:11], "", true) + + // conflicts between id and id2 should be gone + assertIndexGet(t, index, id[:6], id, false) + assertIndexGet(t, index, id[:4], id, false) + assertIndexGet(t, index, id[:1], id, false) + + // non-conflicting substrings should still not conflict + assertIndexGet(t, index, id[:7], id, false) + assertIndexGet(t, index, id[:15], id, false) + assertIndexGet(t, index, id, id, false) +} + +func assertIndexGet(t *testing.T, index *TruncIndex, input, expectedResult string, expectError bool) { + if result, err := index.Get(input); err != nil && !expectError { + t.Fatalf("Unexpected error getting '%s': %s", input, err) + } else if err == nil && expectError { + t.Fatalf("Getting '%s' should return an error, not '%s'", input, result) + } else if result != expectedResult { + t.Fatalf("Getting '%s' returned '%s' instead of '%s'", input, result, expectedResult) + } +} + +func BenchmarkTruncIndexAdd100(b *testing.B) { + var testSet []string + for i := 0; i < 100; i++ { + testSet = append(testSet, utils.GenerateRandomID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexAdd250(b *testing.B) { + var testSet []string + for i := 0; i < 250; i++ { + testSet = append(testSet, utils.GenerateRandomID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexAdd500(b *testing.B) { + var testSet []string + for i := 0; i < 500; i++ { + testSet = append(testSet, utils.GenerateRandomID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexGet100(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 100; i++ { + testSet = append(testSet, utils.GenerateRandomID()) + } + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} + +func BenchmarkTruncIndexGet250(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 250; i++ { + testSet = append(testSet, utils.GenerateRandomID()) + } + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} + +func BenchmarkTruncIndexGet500(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 500; i++ { + testSet = append(testSet, utils.GenerateRandomID()) + } + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} + +func BenchmarkTruncIndexDelete100(b *testing.B) { + var testSet []string + for i := 0; i < 100; i++ { + testSet = append(testSet, utils.GenerateRandomID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + b.StartTimer() + for _, id := range testSet { + if err := index.Delete(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexDelete250(b *testing.B) { + var testSet []string + for i := 0; i < 250; i++ { + testSet = append(testSet, utils.GenerateRandomID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + b.StartTimer() + for _, id := range testSet { + if err := index.Delete(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexDelete500(b *testing.B) { + var testSet []string + for i := 0; i < 500; i++ { + testSet = append(testSet, utils.GenerateRandomID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + b.StartTimer() + for _, id := range testSet { + if err := index.Delete(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexNew100(b *testing.B) { + var testSet []string + for i := 0; i < 100; i++ { + testSet = append(testSet, utils.GenerateRandomID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + NewTruncIndex(testSet) + } +} + +func BenchmarkTruncIndexNew250(b *testing.B) { + var testSet []string + for i := 0; i < 250; i++ { + testSet = append(testSet, utils.GenerateRandomID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + NewTruncIndex(testSet) + } +} + +func BenchmarkTruncIndexNew500(b *testing.B) { + var testSet []string + for i := 0; i < 500; i++ { + testSet = append(testSet, utils.GenerateRandomID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + NewTruncIndex(testSet) + } +} + +func BenchmarkTruncIndexAddGet100(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 500; i++ { + id := utils.GenerateRandomID() + testSet = append(testSet, id) + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} + +func BenchmarkTruncIndexAddGet250(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 500; i++ { + id := utils.GenerateRandomID() + testSet = append(testSet, id) + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} + +func BenchmarkTruncIndexAddGet500(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 500; i++ { + id := utils.GenerateRandomID() + testSet = append(testSet, id) + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/units/MAINTAINERS b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/units/MAINTAINERS new file mode 100644 index 0000000000..68a97d2fc2 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/units/MAINTAINERS @@ -0,0 +1,2 @@ +Michael Crosby (@crosbymichael) +Victor Vieux (@vieux) diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/units/duration.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/units/duration.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/units/duration.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/units/duration.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/units/duration_test.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/units/duration_test.go new file mode 100644 index 0000000000..a22947402b --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/units/duration_test.go @@ -0,0 +1,46 @@ +package units + +import ( + "testing" + "time" +) + +func TestHumanDuration(t *testing.T) { + // Useful duration abstractions + day := 24 * time.Hour + week := 7 * day + month := 30 * day + year := 365 * day + + assertEquals(t, "Less than a second", HumanDuration(450*time.Millisecond)) + assertEquals(t, "47 seconds", HumanDuration(47*time.Second)) + assertEquals(t, "About a minute", HumanDuration(1*time.Minute)) + assertEquals(t, "3 minutes", HumanDuration(3*time.Minute)) + assertEquals(t, "35 minutes", HumanDuration(35*time.Minute)) + assertEquals(t, "35 minutes", HumanDuration(35*time.Minute+40*time.Second)) + assertEquals(t, "About an hour", HumanDuration(1*time.Hour)) + assertEquals(t, "About an hour", HumanDuration(1*time.Hour+45*time.Minute)) + assertEquals(t, "3 hours", HumanDuration(3*time.Hour)) + assertEquals(t, "3 hours", HumanDuration(3*time.Hour+59*time.Minute)) + assertEquals(t, "4 hours", HumanDuration(3*time.Hour+60*time.Minute)) + assertEquals(t, "24 hours", HumanDuration(24*time.Hour)) + assertEquals(t, "36 hours", HumanDuration(1*day+12*time.Hour)) + assertEquals(t, "2 days", HumanDuration(2*day)) + assertEquals(t, "7 days", HumanDuration(7*day)) + assertEquals(t, "13 days", HumanDuration(13*day+5*time.Hour)) + assertEquals(t, "2 weeks", HumanDuration(2*week)) + assertEquals(t, "2 weeks", HumanDuration(2*week+4*day)) + assertEquals(t, "3 weeks", HumanDuration(3*week)) + assertEquals(t, "4 weeks", HumanDuration(4*week)) + assertEquals(t, "4 weeks", HumanDuration(4*week+3*day)) + assertEquals(t, "4 weeks", HumanDuration(1*month)) + assertEquals(t, "6 weeks", HumanDuration(1*month+2*week)) + assertEquals(t, "8 weeks", HumanDuration(2*month)) + assertEquals(t, "3 months", HumanDuration(3*month+1*week)) + assertEquals(t, "5 months", HumanDuration(5*month+2*week)) + assertEquals(t, "13 months", HumanDuration(13*month)) + assertEquals(t, "23 months", HumanDuration(23*month)) + assertEquals(t, "24 months", HumanDuration(24*month)) + assertEquals(t, "2.010959 years", HumanDuration(24*month+2*week)) + assertEquals(t, "3.164384 years", HumanDuration(3*year+2*month)) +} diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/units/size.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/units/size.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size_test.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size_test.go new file mode 100644 index 0000000000..8dae7e716b --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size_test.go @@ -0,0 +1,98 @@ +package units + +import ( + "reflect" + "runtime" + "strings" + "testing" +) + +func TestHumanSize(t *testing.T) { + assertEquals(t, "1 kB", HumanSize(1000)) + assertEquals(t, "1.024 kB", HumanSize(1024)) + assertEquals(t, "1 MB", HumanSize(1000000)) + assertEquals(t, "1.049 MB", HumanSize(1048576)) + assertEquals(t, "2 MB", HumanSize(2*MB)) + assertEquals(t, "3.42 GB", HumanSize(3.42*GB)) + assertEquals(t, "5.372 TB", HumanSize(5.372*TB)) + assertEquals(t, "2.22 PB", HumanSize(2.22*PB)) +} + +func TestFromHumanSize(t *testing.T) { + assertSuccessEquals(t, 32, FromHumanSize, "32") + assertSuccessEquals(t, 32, FromHumanSize, "32b") + assertSuccessEquals(t, 32, FromHumanSize, "32B") + assertSuccessEquals(t, 32*KB, FromHumanSize, "32k") + assertSuccessEquals(t, 32*KB, FromHumanSize, "32K") + assertSuccessEquals(t, 32*KB, FromHumanSize, "32kb") + assertSuccessEquals(t, 32*KB, FromHumanSize, "32Kb") + assertSuccessEquals(t, 32*MB, FromHumanSize, "32Mb") + assertSuccessEquals(t, 32*GB, FromHumanSize, "32Gb") + assertSuccessEquals(t, 32*TB, FromHumanSize, "32Tb") + assertSuccessEquals(t, 32*PB, FromHumanSize, "32Pb") + + assertError(t, FromHumanSize, "") + assertError(t, FromHumanSize, "hello") + assertError(t, FromHumanSize, "-32") + assertError(t, FromHumanSize, "32.3") + assertError(t, FromHumanSize, " 32 ") + assertError(t, FromHumanSize, "32.3Kb") + assertError(t, FromHumanSize, "32 mb") + assertError(t, FromHumanSize, "32m b") + assertError(t, FromHumanSize, "32bm") +} + +func TestRAMInBytes(t *testing.T) { + assertSuccessEquals(t, 32, RAMInBytes, "32") + assertSuccessEquals(t, 32, RAMInBytes, "32b") + assertSuccessEquals(t, 32, RAMInBytes, "32B") + assertSuccessEquals(t, 32*KiB, RAMInBytes, "32k") + assertSuccessEquals(t, 32*KiB, RAMInBytes, "32K") + assertSuccessEquals(t, 32*KiB, RAMInBytes, "32kb") + assertSuccessEquals(t, 32*KiB, RAMInBytes, "32Kb") + assertSuccessEquals(t, 32*MiB, RAMInBytes, "32Mb") + assertSuccessEquals(t, 32*GiB, RAMInBytes, "32Gb") + assertSuccessEquals(t, 32*TiB, RAMInBytes, "32Tb") + assertSuccessEquals(t, 32*PiB, RAMInBytes, "32Pb") + assertSuccessEquals(t, 32*PiB, RAMInBytes, "32PB") + assertSuccessEquals(t, 32*PiB, RAMInBytes, "32P") + + assertError(t, RAMInBytes, "") + assertError(t, RAMInBytes, "hello") + assertError(t, RAMInBytes, "-32") + assertError(t, RAMInBytes, "32.3") + assertError(t, RAMInBytes, " 32 ") + assertError(t, RAMInBytes, "32.3Kb") + assertError(t, RAMInBytes, "32 mb") + assertError(t, RAMInBytes, "32m b") + assertError(t, RAMInBytes, "32bm") +} + +func assertEquals(t *testing.T, expected, actual interface{}) { + if expected != actual { + t.Errorf("Expected '%v' but got '%v'", expected, actual) + } +} + +// func that maps to the parse function signatures as testing abstraction +type parseFn func(string) (int64, error) + +// Define 'String()' for pretty-print +func (fn parseFn) String() string { + fnName := runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name() + return fnName[strings.LastIndex(fnName, ".")+1:] +} + +func assertSuccessEquals(t *testing.T, expected int64, fn parseFn, arg string) { + res, err := fn(arg) + if err != nil || res != expected { + t.Errorf("%s(\"%s\") -> expected '%d' but got '%d' with error '%v'", fn, arg, expected, res, err) + } +} + +func assertError(t *testing.T, fn parseFn, arg string) { + res, err := fn(arg) + if err == nil && res != -1 { + t.Errorf("%s(\"%s\") -> expected error but got '%d'", fn, arg, res) + } +} diff --git a/tests/_vendor/src/github.com/docker/docker/pkg/version/version.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/version/version.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/pkg/version/version.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/pkg/version/version.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/version/version_test.go b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/version/version_test.go new file mode 100644 index 0000000000..c02ec40fcb --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/pkg/version/version_test.go @@ -0,0 +1,27 @@ +package version + +import ( + "testing" +) + +func assertVersion(t *testing.T, a, b string, result int) { + if r := Version(a).compareTo(Version(b)); r != result { + t.Fatalf("Unexpected version comparison result. Found %d, expected %d", r, result) + } +} + +func TestCompareVersion(t *testing.T) { + assertVersion(t, "1.12", "1.12", 0) + assertVersion(t, "1.0.0", "1", 0) + assertVersion(t, "1", "1.0.0", 0) + assertVersion(t, "1.05.00.0156", "1.0.221.9289", 1) + assertVersion(t, "1", "1.0.1", -1) + assertVersion(t, "1.0.1", "1", 1) + assertVersion(t, "1.0.1", "1.0.2", -1) + assertVersion(t, "1.0.2", "1.0.3", -1) + assertVersion(t, "1.0.3", "1.1", -1) + assertVersion(t, "1.1", "1.1.1", -1) + assertVersion(t, "1.1.1", "1.1.2", -1) + assertVersion(t, "1.1.2", "1.2", -1) + +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/registry/MAINTAINERS b/tests/Godeps/_workspace/src/github.com/docker/docker/registry/MAINTAINERS new file mode 100644 index 0000000000..fdb03ed573 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/registry/MAINTAINERS @@ -0,0 +1,5 @@ +Sam Alba (@samalba) +Joffrey Fuhrer (@shin-) +Ken Cochrane (@kencochrane) +Vincent Batts (@vbatts) +Olivier Gambier (@dmp42) diff --git a/tests/_vendor/src/github.com/docker/docker/registry/auth.go b/tests/Godeps/_workspace/src/github.com/docker/docker/registry/auth.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/registry/auth.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/registry/auth.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/registry/auth_test.go b/tests/Godeps/_workspace/src/github.com/docker/docker/registry/auth_test.go new file mode 100644 index 0000000000..3cb1a9ac4b --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/registry/auth_test.go @@ -0,0 +1,149 @@ +package registry + +import ( + "io/ioutil" + "os" + "testing" +) + +func TestEncodeAuth(t *testing.T) { + newAuthConfig := &AuthConfig{Username: "ken", Password: "test", Email: "test@example.com"} + authStr := encodeAuth(newAuthConfig) + decAuthConfig := &AuthConfig{} + var err error + decAuthConfig.Username, decAuthConfig.Password, err = decodeAuth(authStr) + if err != nil { + t.Fatal(err) + } + if newAuthConfig.Username != decAuthConfig.Username { + t.Fatal("Encode Username doesn't match decoded Username") + } + if newAuthConfig.Password != decAuthConfig.Password { + t.Fatal("Encode Password doesn't match decoded Password") + } + if authStr != "a2VuOnRlc3Q=" { + t.Fatal("AuthString encoding isn't correct.") + } +} + +func setupTempConfigFile() (*ConfigFile, error) { + root, err := ioutil.TempDir("", "docker-test-auth") + if err != nil { + return nil, err + } + configFile := &ConfigFile{ + rootPath: root, + Configs: make(map[string]AuthConfig), + } + + for _, registry := range []string{"testIndex", IndexServerAddress()} { + configFile.Configs[registry] = AuthConfig{ + Username: "docker-user", + Password: "docker-pass", + Email: "docker@docker.io", + } + } + + return configFile, nil +} + +func TestSameAuthDataPostSave(t *testing.T) { + configFile, err := setupTempConfigFile() + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(configFile.rootPath) + + err = SaveConfig(configFile) + if err != nil { + t.Fatal(err) + } + + authConfig := configFile.Configs["testIndex"] + if authConfig.Username != "docker-user" { + t.Fail() + } + if authConfig.Password != "docker-pass" { + t.Fail() + } + if authConfig.Email != "docker@docker.io" { + t.Fail() + } + if authConfig.Auth != "" { + t.Fail() + } +} + +func TestResolveAuthConfigIndexServer(t *testing.T) { + configFile, err := setupTempConfigFile() + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(configFile.rootPath) + + for _, registry := range []string{"", IndexServerAddress()} { + resolved := configFile.ResolveAuthConfig(registry) + if resolved != configFile.Configs[IndexServerAddress()] { + t.Fail() + } + } +} + +func TestResolveAuthConfigFullURL(t *testing.T) { + configFile, err := setupTempConfigFile() + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(configFile.rootPath) + + registryAuth := AuthConfig{ + Username: "foo-user", + Password: "foo-pass", + Email: "foo@example.com", + } + localAuth := AuthConfig{ + Username: "bar-user", + Password: "bar-pass", + Email: "bar@example.com", + } + configFile.Configs["https://registry.example.com/v1/"] = registryAuth + configFile.Configs["http://localhost:8000/v1/"] = localAuth + configFile.Configs["registry.com"] = registryAuth + + validRegistries := map[string][]string{ + "https://registry.example.com/v1/": { + "https://registry.example.com/v1/", + "http://registry.example.com/v1/", + "registry.example.com", + "registry.example.com/v1/", + }, + "http://localhost:8000/v1/": { + "https://localhost:8000/v1/", + "http://localhost:8000/v1/", + "localhost:8000", + "localhost:8000/v1/", + }, + "registry.com": { + "https://registry.com/v1/", + "http://registry.com/v1/", + "registry.com", + "registry.com/v1/", + }, + } + + for configKey, registries := range validRegistries { + for _, registry := range registries { + var ( + configured AuthConfig + ok bool + ) + resolved := configFile.ResolveAuthConfig(registry) + if configured, ok = configFile.Configs[configKey]; !ok { + t.Fail() + } + if resolved.Email != configured.Email { + t.Errorf("%s -> %q != %q\n", registry, resolved.Email, configured.Email) + } + } + } +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/registry/endpoint.go b/tests/Godeps/_workspace/src/github.com/docker/docker/registry/endpoint.go new file mode 100644 index 0000000000..5313a8079f --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/registry/endpoint.go @@ -0,0 +1,129 @@ +package registry + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/docker/docker/pkg/log" +) + +// scans string for api version in the URL path. returns the trimmed hostname, if version found, string and API version. +func scanForApiVersion(hostname string) (string, APIVersion) { + var ( + chunks []string + apiVersionStr string + ) + if strings.HasSuffix(hostname, "/") { + chunks = strings.Split(hostname[:len(hostname)-1], "/") + apiVersionStr = chunks[len(chunks)-1] + } else { + chunks = strings.Split(hostname, "/") + apiVersionStr = chunks[len(chunks)-1] + } + for k, v := range apiVersions { + if apiVersionStr == v { + hostname = strings.Join(chunks[:len(chunks)-1], "/") + return hostname, k + } + } + return hostname, DefaultAPIVersion +} + +func NewEndpoint(hostname string) (*Endpoint, error) { + var ( + endpoint Endpoint + trimmedHostname string + err error + ) + if !strings.HasPrefix(hostname, "http") { + hostname = "https://" + hostname + } + trimmedHostname, endpoint.Version = scanForApiVersion(hostname) + endpoint.URL, err = url.Parse(trimmedHostname) + if err != nil { + return nil, err + } + + endpoint.URL.Scheme = "https" + if _, err := endpoint.Ping(); err != nil { + log.Debugf("Registry %s does not work (%s), falling back to http", endpoint, err) + // TODO: Check if http fallback is enabled + endpoint.URL.Scheme = "http" + if _, err = endpoint.Ping(); err != nil { + return nil, errors.New("Invalid Registry endpoint: " + err.Error()) + } + } + + return &endpoint, nil +} + +type Endpoint struct { + URL *url.URL + Version APIVersion +} + +// Get the formated URL for the root of this registry Endpoint +func (e Endpoint) String() string { + return fmt.Sprintf("%s/v%d/", e.URL.String(), e.Version) +} + +func (e Endpoint) VersionString(version APIVersion) string { + return fmt.Sprintf("%s/v%d/", e.URL.String(), version) +} + +func (e Endpoint) Ping() (RegistryInfo, error) { + if e.String() == IndexServerAddress() { + // Skip the check, we now this one is valid + // (and we never want to fallback to http in case of error) + return RegistryInfo{Standalone: false}, nil + } + + req, err := http.NewRequest("GET", e.String()+"_ping", nil) + if err != nil { + return RegistryInfo{Standalone: false}, err + } + + resp, _, err := doRequest(req, nil, ConnectTimeout) + if err != nil { + return RegistryInfo{Standalone: false}, err + } + + defer resp.Body.Close() + + jsonString, err := ioutil.ReadAll(resp.Body) + if err != nil { + return RegistryInfo{Standalone: false}, fmt.Errorf("Error while reading the http response: %s", err) + } + + // If the header is absent, we assume true for compatibility with earlier + // versions of the registry. default to true + info := RegistryInfo{ + Standalone: true, + } + if err := json.Unmarshal(jsonString, &info); err != nil { + log.Debugf("Error unmarshalling the _ping RegistryInfo: %s", err) + // don't stop here. Just assume sane defaults + } + if hdr := resp.Header.Get("X-Docker-Registry-Version"); hdr != "" { + log.Debugf("Registry version header: '%s'", hdr) + info.Version = hdr + } + log.Debugf("RegistryInfo.Version: %q", info.Version) + + standalone := resp.Header.Get("X-Docker-Registry-Standalone") + log.Debugf("Registry standalone header: '%s'", standalone) + // Accepted values are "true" (case-insensitive) and "1". + if strings.EqualFold(standalone, "true") || standalone == "1" { + info.Standalone = true + } else if len(standalone) > 0 { + // there is a header set, and it is not "true" or "1", so assume fails + info.Standalone = false + } + log.Debugf("RegistryInfo.Standalone: %t", info.Standalone) + return info, nil +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/registry/httpfactory.go b/tests/Godeps/_workspace/src/github.com/docker/docker/registry/httpfactory.go new file mode 100644 index 0000000000..4c78436094 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/registry/httpfactory.go @@ -0,0 +1,46 @@ +package registry + +import ( + "runtime" + + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/utils" +) + +func HTTPRequestFactory(metaHeaders map[string][]string) *utils.HTTPRequestFactory { + // FIXME: this replicates the 'info' job. + httpVersion := make([]utils.VersionInfo, 0, 4) + httpVersion = append(httpVersion, &simpleVersionInfo{"docker", dockerversion.VERSION}) + httpVersion = append(httpVersion, &simpleVersionInfo{"go", runtime.Version()}) + httpVersion = append(httpVersion, &simpleVersionInfo{"git-commit", dockerversion.GITCOMMIT}) + if kernelVersion, err := kernel.GetKernelVersion(); err == nil { + httpVersion = append(httpVersion, &simpleVersionInfo{"kernel", kernelVersion.String()}) + } + httpVersion = append(httpVersion, &simpleVersionInfo{"os", runtime.GOOS}) + httpVersion = append(httpVersion, &simpleVersionInfo{"arch", runtime.GOARCH}) + ud := utils.NewHTTPUserAgentDecorator(httpVersion...) + md := &utils.HTTPMetaHeadersDecorator{ + Headers: metaHeaders, + } + factory := utils.NewHTTPRequestFactory(ud, md) + return factory +} + +// simpleVersionInfo is a simple implementation of +// the interface VersionInfo, which is used +// to provide version information for some product, +// component, etc. It stores the product name and the version +// in string and returns them on calls to Name() and Version(). +type simpleVersionInfo struct { + name string + version string +} + +func (v *simpleVersionInfo) Name() string { + return v.name +} + +func (v *simpleVersionInfo) Version() string { + return v.version +} diff --git a/tests/_vendor/src/github.com/docker/docker/registry/registry.go b/tests/Godeps/_workspace/src/github.com/docker/docker/registry/registry.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/registry/registry.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/registry/registry.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/registry/registry_mock_test.go b/tests/Godeps/_workspace/src/github.com/docker/docker/registry/registry_mock_test.go new file mode 100644 index 0000000000..379dc78f47 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/registry/registry_mock_test.go @@ -0,0 +1,362 @@ +package registry + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "strconv" + "strings" + "testing" + "time" + + "github.com/gorilla/mux" + + "github.com/docker/docker/pkg/log" +) + +var ( + testHttpServer *httptest.Server + testLayers = map[string]map[string]string{ + "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20": { + "json": `{"id":"77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", + "comment":"test base image","created":"2013-03-23T12:53:11.10432-07:00", + "container_config":{"Hostname":"","User":"","Memory":0,"MemorySwap":0, + "CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false, + "PortSpecs":null,"Tty":false,"OpenStdin":false,"StdinOnce":false, + "Env":null,"Cmd":null,"Dns":null,"Image":"","Volumes":null, + "VolumesFrom":"","Entrypoint":null},"Size":424242}`, + "checksum_simple": "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37", + "checksum_tarsum": "tarsum+sha256:4409a0685741ca86d38df878ed6f8cbba4c99de5dc73cd71aef04be3bb70be7c", + "ancestry": `["77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20"]`, + "layer": string([]byte{ + 0x1f, 0x8b, 0x08, 0x08, 0x0e, 0xb0, 0xee, 0x51, 0x02, 0x03, 0x6c, 0x61, 0x79, 0x65, + 0x72, 0x2e, 0x74, 0x61, 0x72, 0x00, 0xed, 0xd2, 0x31, 0x0e, 0xc2, 0x30, 0x0c, 0x05, + 0x50, 0xcf, 0x9c, 0xc2, 0x27, 0x48, 0xed, 0x38, 0x4e, 0xce, 0x13, 0x44, 0x2b, 0x66, + 0x62, 0x24, 0x8e, 0x4f, 0xa0, 0x15, 0x63, 0xb6, 0x20, 0x21, 0xfc, 0x96, 0xbf, 0x78, + 0xb0, 0xf5, 0x1d, 0x16, 0x98, 0x8e, 0x88, 0x8a, 0x2a, 0xbe, 0x33, 0xef, 0x49, 0x31, + 0xed, 0x79, 0x40, 0x8e, 0x5c, 0x44, 0x85, 0x88, 0x33, 0x12, 0x73, 0x2c, 0x02, 0xa8, + 0xf0, 0x05, 0xf7, 0x66, 0xf5, 0xd6, 0x57, 0x69, 0xd7, 0x7a, 0x19, 0xcd, 0xf5, 0xb1, + 0x6d, 0x1b, 0x1f, 0xf9, 0xba, 0xe3, 0x93, 0x3f, 0x22, 0x2c, 0xb6, 0x36, 0x0b, 0xf6, + 0xb0, 0xa9, 0xfd, 0xe7, 0x94, 0x46, 0xfd, 0xeb, 0xd1, 0x7f, 0x2c, 0xc4, 0xd2, 0xfb, + 0x97, 0xfe, 0x02, 0x80, 0xe4, 0xfd, 0x4f, 0x77, 0xae, 0x6d, 0x3d, 0x81, 0x73, 0xce, + 0xb9, 0x7f, 0xf3, 0x04, 0x41, 0xc1, 0xab, 0xc6, 0x00, 0x0a, 0x00, 0x00, + }), + }, + "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d": { + "json": `{"id":"42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", + "parent":"77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", + "comment":"test base image","created":"2013-03-23T12:55:11.10432-07:00", + "container_config":{"Hostname":"","User":"","Memory":0,"MemorySwap":0, + "CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false, + "PortSpecs":null,"Tty":false,"OpenStdin":false,"StdinOnce":false, + "Env":null,"Cmd":null,"Dns":null,"Image":"","Volumes":null, + "VolumesFrom":"","Entrypoint":null},"Size":424242}`, + "checksum_simple": "sha256:bea7bf2e4bacd479344b737328db47b18880d09096e6674165533aa994f5e9f2", + "checksum_tarsum": "tarsum+sha256:68fdb56fb364f074eec2c9b3f85ca175329c4dcabc4a6a452b7272aa613a07a2", + "ancestry": `["42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", + "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20"]`, + "layer": string([]byte{ + 0x1f, 0x8b, 0x08, 0x08, 0xbd, 0xb3, 0xee, 0x51, 0x02, 0x03, 0x6c, 0x61, 0x79, 0x65, + 0x72, 0x2e, 0x74, 0x61, 0x72, 0x00, 0xed, 0xd1, 0x31, 0x0e, 0xc2, 0x30, 0x0c, 0x05, + 0x50, 0xcf, 0x9c, 0xc2, 0x27, 0x48, 0x9d, 0x38, 0x8e, 0xcf, 0x53, 0x51, 0xaa, 0x56, + 0xea, 0x44, 0x82, 0xc4, 0xf1, 0x09, 0xb4, 0xea, 0x98, 0x2d, 0x48, 0x08, 0xbf, 0xe5, + 0x2f, 0x1e, 0xfc, 0xf5, 0xdd, 0x00, 0xdd, 0x11, 0x91, 0x8a, 0xe0, 0x27, 0xd3, 0x9e, + 0x14, 0xe2, 0x9e, 0x07, 0xf4, 0xc1, 0x2b, 0x0b, 0xfb, 0xa4, 0x82, 0xe4, 0x3d, 0x93, + 0x02, 0x0a, 0x7c, 0xc1, 0x23, 0x97, 0xf1, 0x5e, 0x5f, 0xc9, 0xcb, 0x38, 0xb5, 0xee, + 0xea, 0xd9, 0x3c, 0xb7, 0x4b, 0xbe, 0x7b, 0x9c, 0xf9, 0x23, 0xdc, 0x50, 0x6e, 0xb9, + 0xb8, 0xf2, 0x2c, 0x5d, 0xf7, 0x4f, 0x31, 0xb6, 0xf6, 0x4f, 0xc7, 0xfe, 0x41, 0x55, + 0x63, 0xdd, 0x9f, 0x89, 0x09, 0x90, 0x6c, 0xff, 0xee, 0xae, 0xcb, 0xba, 0x4d, 0x17, + 0x30, 0xc6, 0x18, 0xf3, 0x67, 0x5e, 0xc1, 0xed, 0x21, 0x5d, 0x00, 0x0a, 0x00, 0x00, + }), + }, + } + testRepositories = map[string]map[string]string{ + "foo42/bar": { + "latest": "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", + }, + } +) + +func init() { + r := mux.NewRouter() + + // /v1/ + r.HandleFunc("/v1/_ping", handlerGetPing).Methods("GET") + r.HandleFunc("/v1/images/{image_id:[^/]+}/{action:json|layer|ancestry}", handlerGetImage).Methods("GET") + r.HandleFunc("/v1/images/{image_id:[^/]+}/{action:json|layer|checksum}", handlerPutImage).Methods("PUT") + r.HandleFunc("/v1/repositories/{repository:.+}/tags", handlerGetDeleteTags).Methods("GET", "DELETE") + r.HandleFunc("/v1/repositories/{repository:.+}/tags/{tag:.+}", handlerGetTag).Methods("GET") + r.HandleFunc("/v1/repositories/{repository:.+}/tags/{tag:.+}", handlerPutTag).Methods("PUT") + r.HandleFunc("/v1/users{null:.*}", handlerUsers).Methods("GET", "POST", "PUT") + r.HandleFunc("/v1/repositories/{repository:.+}{action:/images|/}", handlerImages).Methods("GET", "PUT", "DELETE") + r.HandleFunc("/v1/repositories/{repository:.+}/auth", handlerAuth).Methods("PUT") + r.HandleFunc("/v1/search", handlerSearch).Methods("GET") + + // /v2/ + r.HandleFunc("/v2/version", handlerGetPing).Methods("GET") + + testHttpServer = httptest.NewServer(handlerAccessLog(r)) +} + +func handlerAccessLog(handler http.Handler) http.Handler { + logHandler := func(w http.ResponseWriter, r *http.Request) { + log.Debugf("%s \"%s %s\"", r.RemoteAddr, r.Method, r.URL) + handler.ServeHTTP(w, r) + } + return http.HandlerFunc(logHandler) +} + +func makeURL(req string) string { + return testHttpServer.URL + req +} + +func writeHeaders(w http.ResponseWriter) { + h := w.Header() + h.Add("Server", "docker-tests/mock") + h.Add("Expires", "-1") + h.Add("Content-Type", "application/json") + h.Add("Pragma", "no-cache") + h.Add("Cache-Control", "no-cache") + h.Add("X-Docker-Registry-Version", "0.0.0") + h.Add("X-Docker-Registry-Config", "mock") +} + +func writeResponse(w http.ResponseWriter, message interface{}, code int) { + writeHeaders(w) + w.WriteHeader(code) + body, err := json.Marshal(message) + if err != nil { + io.WriteString(w, err.Error()) + return + } + w.Write(body) +} + +func readJSON(r *http.Request, dest interface{}) error { + body, err := ioutil.ReadAll(r.Body) + if err != nil { + return err + } + return json.Unmarshal(body, dest) +} + +func apiError(w http.ResponseWriter, message string, code int) { + body := map[string]string{ + "error": message, + } + writeResponse(w, body, code) +} + +func assertEqual(t *testing.T, a interface{}, b interface{}, message string) { + if a == b { + return + } + if len(message) == 0 { + message = fmt.Sprintf("%v != %v", a, b) + } + t.Fatal(message) +} + +func requiresAuth(w http.ResponseWriter, r *http.Request) bool { + writeCookie := func() { + value := fmt.Sprintf("FAKE-SESSION-%d", time.Now().UnixNano()) + cookie := &http.Cookie{Name: "session", Value: value, MaxAge: 3600} + http.SetCookie(w, cookie) + //FIXME(sam): this should be sent only on Index routes + value = fmt.Sprintf("FAKE-TOKEN-%d", time.Now().UnixNano()) + w.Header().Add("X-Docker-Token", value) + } + if len(r.Cookies()) > 0 { + writeCookie() + return true + } + if len(r.Header.Get("Authorization")) > 0 { + writeCookie() + return true + } + w.Header().Add("WWW-Authenticate", "token") + apiError(w, "Wrong auth", 401) + return false +} + +func handlerGetPing(w http.ResponseWriter, r *http.Request) { + writeResponse(w, true, 200) +} + +func handlerGetImage(w http.ResponseWriter, r *http.Request) { + if !requiresAuth(w, r) { + return + } + vars := mux.Vars(r) + layer, exists := testLayers[vars["image_id"]] + if !exists { + http.NotFound(w, r) + return + } + writeHeaders(w) + layer_size := len(layer["layer"]) + w.Header().Add("X-Docker-Size", strconv.Itoa(layer_size)) + io.WriteString(w, layer[vars["action"]]) +} + +func handlerPutImage(w http.ResponseWriter, r *http.Request) { + if !requiresAuth(w, r) { + return + } + vars := mux.Vars(r) + image_id := vars["image_id"] + action := vars["action"] + layer, exists := testLayers[image_id] + if !exists { + if action != "json" { + http.NotFound(w, r) + return + } + layer = make(map[string]string) + testLayers[image_id] = layer + } + if checksum := r.Header.Get("X-Docker-Checksum"); checksum != "" { + if checksum != layer["checksum_simple"] && checksum != layer["checksum_tarsum"] { + apiError(w, "Wrong checksum", 400) + return + } + } + body, err := ioutil.ReadAll(r.Body) + if err != nil { + apiError(w, fmt.Sprintf("Error: %s", err), 500) + return + } + layer[action] = string(body) + writeResponse(w, true, 200) +} + +func handlerGetDeleteTags(w http.ResponseWriter, r *http.Request) { + if !requiresAuth(w, r) { + return + } + repositoryName := mux.Vars(r)["repository"] + tags, exists := testRepositories[repositoryName] + if !exists { + apiError(w, "Repository not found", 404) + return + } + if r.Method == "DELETE" { + delete(testRepositories, repositoryName) + writeResponse(w, true, 200) + return + } + writeResponse(w, tags, 200) +} + +func handlerGetTag(w http.ResponseWriter, r *http.Request) { + if !requiresAuth(w, r) { + return + } + vars := mux.Vars(r) + repositoryName := vars["repository"] + tagName := vars["tag"] + tags, exists := testRepositories[repositoryName] + if !exists { + apiError(w, "Repository not found", 404) + return + } + tag, exists := tags[tagName] + if !exists { + apiError(w, "Tag not found", 404) + return + } + writeResponse(w, tag, 200) +} + +func handlerPutTag(w http.ResponseWriter, r *http.Request) { + if !requiresAuth(w, r) { + return + } + vars := mux.Vars(r) + repositoryName := vars["repository"] + tagName := vars["tag"] + tags, exists := testRepositories[repositoryName] + if !exists { + tags := make(map[string]string) + testRepositories[repositoryName] = tags + } + tagValue := "" + readJSON(r, tagValue) + tags[tagName] = tagValue + writeResponse(w, true, 200) +} + +func handlerUsers(w http.ResponseWriter, r *http.Request) { + code := 200 + if r.Method == "POST" { + code = 201 + } else if r.Method == "PUT" { + code = 204 + } + writeResponse(w, "", code) +} + +func handlerImages(w http.ResponseWriter, r *http.Request) { + u, _ := url.Parse(testHttpServer.URL) + w.Header().Add("X-Docker-Endpoints", fmt.Sprintf("%s , %s ", u.Host, "test.example.com")) + w.Header().Add("X-Docker-Token", fmt.Sprintf("FAKE-SESSION-%d", time.Now().UnixNano())) + if r.Method == "PUT" { + if strings.HasSuffix(r.URL.Path, "images") { + writeResponse(w, "", 204) + return + } + writeResponse(w, "", 200) + return + } + if r.Method == "DELETE" { + writeResponse(w, "", 204) + return + } + images := []map[string]string{} + for image_id, layer := range testLayers { + image := make(map[string]string) + image["id"] = image_id + image["checksum"] = layer["checksum_tarsum"] + image["Tag"] = "latest" + images = append(images, image) + } + writeResponse(w, images, 200) +} + +func handlerAuth(w http.ResponseWriter, r *http.Request) { + writeResponse(w, "OK", 200) +} + +func handlerSearch(w http.ResponseWriter, r *http.Request) { + result := &SearchResults{ + Query: "fakequery", + NumResults: 1, + Results: []SearchResult{{Name: "fakeimage", StarCount: 42}}, + } + writeResponse(w, result, 200) +} + +func TestPing(t *testing.T) { + res, err := http.Get(makeURL("/v1/_ping")) + if err != nil { + t.Fatal(err) + } + assertEqual(t, res.StatusCode, 200, "") + assertEqual(t, res.Header.Get("X-Docker-Registry-Config"), "mock", + "This is not a Mocked Registry") +} + +/* Uncomment this to test Mocked Registry locally with curl + * WARNING: Don't push on the repos uncommented, it'll block the tests + * +func TestWait(t *testing.T) { + log.Println("Test HTTP server ready and waiting:", testHttpServer.URL) + c := make(chan int) + <-c +} + +//*/ diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/registry/registry_test.go b/tests/Godeps/_workspace/src/github.com/docker/docker/registry/registry_test.go new file mode 100644 index 0000000000..ab4178126a --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/registry/registry_test.go @@ -0,0 +1,318 @@ +package registry + +import ( + "fmt" + "net/http" + "net/url" + "strings" + "testing" + + "github.com/docker/docker/utils" +) + +var ( + IMAGE_ID = "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d" + TOKEN = []string{"fake-token"} + REPO = "foo42/bar" +) + +func spawnTestRegistrySession(t *testing.T) *Session { + authConfig := &AuthConfig{} + endpoint, err := NewEndpoint(makeURL("/v1/")) + if err != nil { + t.Fatal(err) + } + r, err := NewSession(authConfig, utils.NewHTTPRequestFactory(), endpoint, true) + if err != nil { + t.Fatal(err) + } + return r +} + +func TestPingRegistryEndpoint(t *testing.T) { + ep, err := NewEndpoint(makeURL("/v1/")) + if err != nil { + t.Fatal(err) + } + regInfo, err := ep.Ping() + if err != nil { + t.Fatal(err) + } + assertEqual(t, regInfo.Standalone, true, "Expected standalone to be true (default)") +} + +func TestGetRemoteHistory(t *testing.T) { + r := spawnTestRegistrySession(t) + hist, err := r.GetRemoteHistory(IMAGE_ID, makeURL("/v1/"), TOKEN) + if err != nil { + t.Fatal(err) + } + assertEqual(t, len(hist), 2, "Expected 2 images in history") + assertEqual(t, hist[0], IMAGE_ID, "Expected "+IMAGE_ID+"as first ancestry") + assertEqual(t, hist[1], "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", + "Unexpected second ancestry") +} + +func TestLookupRemoteImage(t *testing.T) { + r := spawnTestRegistrySession(t) + found := r.LookupRemoteImage(IMAGE_ID, makeURL("/v1/"), TOKEN) + assertEqual(t, found, true, "Expected remote lookup to succeed") + found = r.LookupRemoteImage("abcdef", makeURL("/v1/"), TOKEN) + assertEqual(t, found, false, "Expected remote lookup to fail") +} + +func TestGetRemoteImageJSON(t *testing.T) { + r := spawnTestRegistrySession(t) + json, size, err := r.GetRemoteImageJSON(IMAGE_ID, makeURL("/v1/"), TOKEN) + if err != nil { + t.Fatal(err) + } + assertEqual(t, size, 154, "Expected size 154") + if len(json) <= 0 { + t.Fatal("Expected non-empty json") + } + + _, _, err = r.GetRemoteImageJSON("abcdef", makeURL("/v1/"), TOKEN) + if err == nil { + t.Fatal("Expected image not found error") + } +} + +func TestGetRemoteImageLayer(t *testing.T) { + r := spawnTestRegistrySession(t) + data, err := r.GetRemoteImageLayer(IMAGE_ID, makeURL("/v1/"), TOKEN, 0) + if err != nil { + t.Fatal(err) + } + if data == nil { + t.Fatal("Expected non-nil data result") + } + + _, err = r.GetRemoteImageLayer("abcdef", makeURL("/v1/"), TOKEN, 0) + if err == nil { + t.Fatal("Expected image not found error") + } +} + +func TestGetRemoteTags(t *testing.T) { + r := spawnTestRegistrySession(t) + tags, err := r.GetRemoteTags([]string{makeURL("/v1/")}, REPO, TOKEN) + if err != nil { + t.Fatal(err) + } + assertEqual(t, len(tags), 1, "Expected one tag") + assertEqual(t, tags["latest"], IMAGE_ID, "Expected tag latest to map to "+IMAGE_ID) + + _, err = r.GetRemoteTags([]string{makeURL("/v1/")}, "foo42/baz", TOKEN) + if err == nil { + t.Fatal("Expected error when fetching tags for bogus repo") + } +} + +func TestGetRepositoryData(t *testing.T) { + r := spawnTestRegistrySession(t) + parsedUrl, err := url.Parse(makeURL("/v1/")) + if err != nil { + t.Fatal(err) + } + host := "http://" + parsedUrl.Host + "/v1/" + data, err := r.GetRepositoryData("foo42/bar") + if err != nil { + t.Fatal(err) + } + assertEqual(t, len(data.ImgList), 2, "Expected 2 images in ImgList") + assertEqual(t, len(data.Endpoints), 2, + fmt.Sprintf("Expected 2 endpoints in Endpoints, found %d instead", len(data.Endpoints))) + assertEqual(t, data.Endpoints[0], host, + fmt.Sprintf("Expected first endpoint to be %s but found %s instead", host, data.Endpoints[0])) + assertEqual(t, data.Endpoints[1], "http://test.example.com/v1/", + fmt.Sprintf("Expected first endpoint to be http://test.example.com/v1/ but found %s instead", data.Endpoints[1])) + +} + +func TestPushImageJSONRegistry(t *testing.T) { + r := spawnTestRegistrySession(t) + imgData := &ImgData{ + ID: "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", + Checksum: "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37", + } + + err := r.PushImageJSONRegistry(imgData, []byte{0x42, 0xdf, 0x0}, makeURL("/v1/"), TOKEN) + if err != nil { + t.Fatal(err) + } +} + +func TestPushImageLayerRegistry(t *testing.T) { + r := spawnTestRegistrySession(t) + layer := strings.NewReader("") + _, _, err := r.PushImageLayerRegistry(IMAGE_ID, layer, makeURL("/v1/"), TOKEN, []byte{}) + if err != nil { + t.Fatal(err) + } +} + +func TestResolveRepositoryName(t *testing.T) { + _, _, err := ResolveRepositoryName("https://github.com/docker/docker") + assertEqual(t, err, ErrInvalidRepositoryName, "Expected error invalid repo name") + ep, repo, err := ResolveRepositoryName("fooo/bar") + if err != nil { + t.Fatal(err) + } + assertEqual(t, ep, IndexServerAddress(), "Expected endpoint to be index server address") + assertEqual(t, repo, "fooo/bar", "Expected resolved repo to be foo/bar") + + u := makeURL("")[7:] + ep, repo, err = ResolveRepositoryName(u + "/private/moonbase") + if err != nil { + t.Fatal(err) + } + assertEqual(t, ep, u, "Expected endpoint to be "+u) + assertEqual(t, repo, "private/moonbase", "Expected endpoint to be private/moonbase") + + ep, repo, err = ResolveRepositoryName("ubuntu-12.04-base") + if err != nil { + t.Fatal(err) + } + assertEqual(t, ep, IndexServerAddress(), "Expected endpoint to be "+IndexServerAddress()) + assertEqual(t, repo, "ubuntu-12.04-base", "Expected endpoint to be ubuntu-12.04-base") +} + +func TestPushRegistryTag(t *testing.T) { + r := spawnTestRegistrySession(t) + err := r.PushRegistryTag("foo42/bar", IMAGE_ID, "stable", makeURL("/v1/"), TOKEN) + if err != nil { + t.Fatal(err) + } +} + +func TestPushImageJSONIndex(t *testing.T) { + r := spawnTestRegistrySession(t) + imgData := []*ImgData{ + { + ID: "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", + Checksum: "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37", + }, + { + ID: "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", + Checksum: "sha256:bea7bf2e4bacd479344b737328db47b18880d09096e6674165533aa994f5e9f2", + }, + } + repoData, err := r.PushImageJSONIndex("foo42/bar", imgData, false, nil) + if err != nil { + t.Fatal(err) + } + if repoData == nil { + t.Fatal("Expected RepositoryData object") + } + repoData, err = r.PushImageJSONIndex("foo42/bar", imgData, true, []string{r.indexEndpoint.String()}) + if err != nil { + t.Fatal(err) + } + if repoData == nil { + t.Fatal("Expected RepositoryData object") + } +} + +func TestSearchRepositories(t *testing.T) { + r := spawnTestRegistrySession(t) + results, err := r.SearchRepositories("fakequery") + if err != nil { + t.Fatal(err) + } + if results == nil { + t.Fatal("Expected non-nil SearchResults object") + } + assertEqual(t, results.NumResults, 1, "Expected 1 search results") + assertEqual(t, results.Query, "fakequery", "Expected 'fakequery' as query") + assertEqual(t, results.Results[0].StarCount, 42, "Expected 'fakeimage' a ot hae 42 stars") +} + +func TestValidRepositoryName(t *testing.T) { + if err := validateRepositoryName("docker/docker"); err != nil { + t.Fatal(err) + } + // Support 64-byte non-hexadecimal names (hexadecimal names are forbidden) + if err := validateRepositoryName("thisisthesongthatneverendsitgoesonandonandonthisisthesongthatnev"); err != nil { + t.Fatal(err) + } + if err := validateRepositoryName("docker/Docker"); err == nil { + t.Log("Repository name should be invalid") + t.Fail() + } + if err := validateRepositoryName("docker///docker"); err == nil { + t.Log("Repository name should be invalid") + t.Fail() + } + if err := validateRepositoryName("1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a"); err == nil { + t.Log("Repository name should be invalid, 64-byte hexadecimal names forbidden") + t.Fail() + } +} + +func TestTrustedLocation(t *testing.T) { + for _, url := range []string{"http://example.com", "https://example.com:7777", "http://docker.io", "http://test.docker.com", "https://fakedocker.com"} { + req, _ := http.NewRequest("GET", url, nil) + if trustedLocation(req) == true { + t.Fatalf("'%s' shouldn't be detected as a trusted location", url) + } + } + + for _, url := range []string{"https://docker.io", "https://test.docker.com:80"} { + req, _ := http.NewRequest("GET", url, nil) + if trustedLocation(req) == false { + t.Fatalf("'%s' should be detected as a trusted location", url) + } + } +} + +func TestAddRequiredHeadersToRedirectedRequests(t *testing.T) { + for _, urls := range [][]string{ + {"http://docker.io", "https://docker.com"}, + {"https://foo.docker.io:7777", "http://bar.docker.com"}, + {"https://foo.docker.io", "https://example.com"}, + } { + reqFrom, _ := http.NewRequest("GET", urls[0], nil) + reqFrom.Header.Add("Content-Type", "application/json") + reqFrom.Header.Add("Authorization", "super_secret") + reqTo, _ := http.NewRequest("GET", urls[1], nil) + + AddRequiredHeadersToRedirectedRequests(reqTo, []*http.Request{reqFrom}) + + if len(reqTo.Header) != 1 { + t.Fatalf("Expected 1 headers, got %d", len(reqTo.Header)) + } + + if reqTo.Header.Get("Content-Type") != "application/json" { + t.Fatal("'Content-Type' should be 'application/json'") + } + + if reqTo.Header.Get("Authorization") != "" { + t.Fatal("'Authorization' should be empty") + } + } + + for _, urls := range [][]string{ + {"https://docker.io", "https://docker.com"}, + {"https://foo.docker.io:7777", "https://bar.docker.com"}, + } { + reqFrom, _ := http.NewRequest("GET", urls[0], nil) + reqFrom.Header.Add("Content-Type", "application/json") + reqFrom.Header.Add("Authorization", "super_secret") + reqTo, _ := http.NewRequest("GET", urls[1], nil) + + AddRequiredHeadersToRedirectedRequests(reqTo, []*http.Request{reqFrom}) + + if len(reqTo.Header) != 2 { + t.Fatalf("Expected 2 headers, got %d", len(reqTo.Header)) + } + + if reqTo.Header.Get("Content-Type") != "application/json" { + t.Fatal("'Content-Type' should be 'application/json'") + } + + if reqTo.Header.Get("Authorization") != "super_secret" { + t.Fatal("'Authorization' should be 'super_secret'") + } + } +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/registry/service.go b/tests/Godeps/_workspace/src/github.com/docker/docker/registry/service.go new file mode 100644 index 0000000000..f7b353000e --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/registry/service.go @@ -0,0 +1,115 @@ +package registry + +import ( + "github.com/docker/docker/engine" +) + +// Service exposes registry capabilities in the standard Engine +// interface. Once installed, it extends the engine with the +// following calls: +// +// 'auth': Authenticate against the public registry +// 'search': Search for images on the public registry +// 'pull': Download images from any registry (TODO) +// 'push': Upload images to any registry (TODO) +type Service struct { +} + +// NewService returns a new instance of Service ready to be +// installed no an engine. +func NewService() *Service { + return &Service{} +} + +// Install installs registry capabilities to eng. +func (s *Service) Install(eng *engine.Engine) error { + eng.Register("auth", s.Auth) + eng.Register("search", s.Search) + return nil +} + +// Auth contacts the public registry with the provided credentials, +// and returns OK if authentication was sucessful. +// It can be used to verify the validity of a client's credentials. +func (s *Service) Auth(job *engine.Job) engine.Status { + var ( + err error + authConfig = &AuthConfig{} + ) + + job.GetenvJson("authConfig", authConfig) + // TODO: this is only done here because auth and registry need to be merged into one pkg + if addr := authConfig.ServerAddress; addr != "" && addr != IndexServerAddress() { + endpoint, err := NewEndpoint(addr) + if err != nil { + return job.Error(err) + } + if _, err := endpoint.Ping(); err != nil { + return job.Error(err) + } + authConfig.ServerAddress = endpoint.String() + } + status, err := Login(authConfig, HTTPRequestFactory(nil)) + if err != nil { + return job.Error(err) + } + job.Printf("%s\n", status) + return engine.StatusOK +} + +// Search queries the public registry for images matching the specified +// search terms, and returns the results. +// +// Argument syntax: search TERM +// +// Option environment: +// 'authConfig': json-encoded credentials to authenticate against the registry. +// The search extends to images only accessible via the credentials. +// +// 'metaHeaders': extra HTTP headers to include in the request to the registry. +// The headers should be passed as a json-encoded dictionary. +// +// Output: +// Results are sent as a collection of structured messages (using engine.Table). +// Each result is sent as a separate message. +// Results are ordered by number of stars on the public registry. +func (s *Service) Search(job *engine.Job) engine.Status { + if n := len(job.Args); n != 1 { + return job.Errorf("Usage: %s TERM", job.Name) + } + var ( + term = job.Args[0] + metaHeaders = map[string][]string{} + authConfig = &AuthConfig{} + ) + job.GetenvJson("authConfig", authConfig) + job.GetenvJson("metaHeaders", metaHeaders) + + hostname, term, err := ResolveRepositoryName(term) + if err != nil { + return job.Error(err) + } + endpoint, err := NewEndpoint(hostname) + if err != nil { + return job.Error(err) + } + r, err := NewSession(authConfig, HTTPRequestFactory(metaHeaders), endpoint, true) + if err != nil { + return job.Error(err) + } + results, err := r.SearchRepositories(term) + if err != nil { + return job.Error(err) + } + outs := engine.NewTable("star_count", 0) + for _, result := range results.Results { + out := &engine.Env{} + out.Import(result) + outs.Add(out) + } + outs.ReverseSort() + if _, err := outs.WriteListTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/registry/session.go b/tests/Godeps/_workspace/src/github.com/docker/docker/registry/session.go new file mode 100644 index 0000000000..5067b8d5de --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/registry/session.go @@ -0,0 +1,617 @@ +package registry + +import ( + "bytes" + "crypto/sha256" + _ "crypto/sha512" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/cookiejar" + "net/url" + "strconv" + "strings" + "time" + + "github.com/docker/docker/pkg/httputils" + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/pkg/tarsum" + "github.com/docker/docker/utils" +) + +type Session struct { + authConfig *AuthConfig + reqFactory *utils.HTTPRequestFactory + indexEndpoint *Endpoint + jar *cookiejar.Jar + timeout TimeoutType +} + +func NewSession(authConfig *AuthConfig, factory *utils.HTTPRequestFactory, endpoint *Endpoint, timeout bool) (r *Session, err error) { + r = &Session{ + authConfig: authConfig, + indexEndpoint: endpoint, + } + + if timeout { + r.timeout = ReceiveTimeout + } + + r.jar, err = cookiejar.New(nil) + if err != nil { + return nil, err + } + + // If we're working with a standalone private registry over HTTPS, send Basic Auth headers + // alongside our requests. + if r.indexEndpoint.VersionString(1) != IndexServerAddress() && r.indexEndpoint.URL.Scheme == "https" { + info, err := r.indexEndpoint.Ping() + if err != nil { + return nil, err + } + if info.Standalone { + log.Debugf("Endpoint %s is eligible for private registry registry. Enabling decorator.", r.indexEndpoint.String()) + dec := utils.NewHTTPAuthDecorator(authConfig.Username, authConfig.Password) + factory.AddDecorator(dec) + } + } + + r.reqFactory = factory + return r, nil +} + +func (r *Session) doRequest(req *http.Request) (*http.Response, *http.Client, error) { + return doRequest(req, r.jar, r.timeout) +} + +// Retrieve the history of a given image from the Registry. +// Return a list of the parent's json (requested image included) +func (r *Session) GetRemoteHistory(imgID, registry string, token []string) ([]string, error) { + req, err := r.reqFactory.NewRequest("GET", registry+"images/"+imgID+"/ancestry", nil) + if err != nil { + return nil, err + } + setTokenAuth(req, token) + res, _, err := r.doRequest(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != 200 { + if res.StatusCode == 401 { + return nil, errLoginRequired + } + return nil, utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch remote history for %s", res.StatusCode, imgID), res) + } + + jsonString, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, fmt.Errorf("Error while reading the http response: %s", err) + } + + log.Debugf("Ancestry: %s", jsonString) + history := new([]string) + if err := json.Unmarshal(jsonString, history); err != nil { + return nil, err + } + return *history, nil +} + +// Check if an image exists in the Registry +// TODO: This method should return the errors instead of masking them and returning false +func (r *Session) LookupRemoteImage(imgID, registry string, token []string) bool { + + req, err := r.reqFactory.NewRequest("GET", registry+"images/"+imgID+"/json", nil) + if err != nil { + log.Errorf("Error in LookupRemoteImage %s", err) + return false + } + setTokenAuth(req, token) + res, _, err := r.doRequest(req) + if err != nil { + log.Errorf("Error in LookupRemoteImage %s", err) + return false + } + res.Body.Close() + return res.StatusCode == 200 +} + +// Retrieve an image from the Registry. +func (r *Session) GetRemoteImageJSON(imgID, registry string, token []string) ([]byte, int, error) { + // Get the JSON + req, err := r.reqFactory.NewRequest("GET", registry+"images/"+imgID+"/json", nil) + if err != nil { + return nil, -1, fmt.Errorf("Failed to download json: %s", err) + } + setTokenAuth(req, token) + res, _, err := r.doRequest(req) + if err != nil { + return nil, -1, fmt.Errorf("Failed to download json: %s", err) + } + defer res.Body.Close() + if res.StatusCode != 200 { + return nil, -1, utils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d", res.StatusCode), res) + } + // if the size header is not present, then set it to '-1' + imageSize := -1 + if hdr := res.Header.Get("X-Docker-Size"); hdr != "" { + imageSize, err = strconv.Atoi(hdr) + if err != nil { + return nil, -1, err + } + } + + jsonString, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, -1, fmt.Errorf("Failed to parse downloaded json: %s (%s)", err, jsonString) + } + return jsonString, imageSize, nil +} + +func (r *Session) GetRemoteImageLayer(imgID, registry string, token []string, imgSize int64) (io.ReadCloser, error) { + var ( + retries = 5 + statusCode = 0 + client *http.Client + res *http.Response + imageURL = fmt.Sprintf("%simages/%s/layer", registry, imgID) + ) + + req, err := r.reqFactory.NewRequest("GET", imageURL, nil) + if err != nil { + return nil, fmt.Errorf("Error while getting from the server: %s\n", err) + } + setTokenAuth(req, token) + for i := 1; i <= retries; i++ { + statusCode = 0 + res, client, err = r.doRequest(req) + if err != nil { + log.Debugf("Error contacting registry: %s", err) + if res != nil { + if res.Body != nil { + res.Body.Close() + } + statusCode = res.StatusCode + } + if i == retries { + return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)", + statusCode, imgID) + } + time.Sleep(time.Duration(i) * 5 * time.Second) + continue + } + break + } + + if res.StatusCode != 200 { + res.Body.Close() + return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)", + res.StatusCode, imgID) + } + + if res.Header.Get("Accept-Ranges") == "bytes" && imgSize > 0 { + log.Debugf("server supports resume") + return httputils.ResumableRequestReaderWithInitialResponse(client, req, 5, imgSize, res), nil + } + log.Debugf("server doesn't support resume") + return res.Body, nil +} + +func (r *Session) GetRemoteTags(registries []string, repository string, token []string) (map[string]string, error) { + if strings.Count(repository, "/") == 0 { + // This will be removed once the Registry supports auto-resolution on + // the "library" namespace + repository = "library/" + repository + } + for _, host := range registries { + endpoint := fmt.Sprintf("%srepositories/%s/tags", host, repository) + req, err := r.reqFactory.NewRequest("GET", endpoint, nil) + + if err != nil { + return nil, err + } + setTokenAuth(req, token) + res, _, err := r.doRequest(req) + if err != nil { + return nil, err + } + + log.Debugf("Got status code %d from %s", res.StatusCode, endpoint) + defer res.Body.Close() + + if res.StatusCode != 200 && res.StatusCode != 404 { + continue + } else if res.StatusCode == 404 { + return nil, fmt.Errorf("Repository not found") + } + + result := make(map[string]string) + rawJSON, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, err + } + if err := json.Unmarshal(rawJSON, &result); err != nil { + return nil, err + } + return result, nil + } + return nil, fmt.Errorf("Could not reach any registry endpoint") +} + +func buildEndpointsList(headers []string, indexEp string) ([]string, error) { + var endpoints []string + parsedUrl, err := url.Parse(indexEp) + if err != nil { + return nil, err + } + var urlScheme = parsedUrl.Scheme + // The Registry's URL scheme has to match the Index' + for _, ep := range headers { + epList := strings.Split(ep, ",") + for _, epListElement := range epList { + endpoints = append( + endpoints, + fmt.Sprintf("%s://%s/v1/", urlScheme, strings.TrimSpace(epListElement))) + } + } + return endpoints, nil +} + +func (r *Session) GetRepositoryData(remote string) (*RepositoryData, error) { + repositoryTarget := fmt.Sprintf("%srepositories/%s/images", r.indexEndpoint.VersionString(1), remote) + + log.Debugf("[registry] Calling GET %s", repositoryTarget) + + req, err := r.reqFactory.NewRequest("GET", repositoryTarget, nil) + if err != nil { + return nil, err + } + if r.authConfig != nil && len(r.authConfig.Username) > 0 { + req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password) + } + req.Header.Set("X-Docker-Token", "true") + + res, _, err := r.doRequest(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode == 401 { + return nil, errLoginRequired + } + // TODO: Right now we're ignoring checksums in the response body. + // In the future, we need to use them to check image validity. + if res.StatusCode != 200 { + return nil, utils.NewHTTPRequestError(fmt.Sprintf("HTTP code: %d", res.StatusCode), res) + } + + var tokens []string + if res.Header.Get("X-Docker-Token") != "" { + tokens = res.Header["X-Docker-Token"] + } + + var endpoints []string + if res.Header.Get("X-Docker-Endpoints") != "" { + endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.VersionString(1)) + if err != nil { + return nil, err + } + } else { + // Assume the endpoint is on the same host + endpoints = append(endpoints, fmt.Sprintf("%s://%s/v1/", r.indexEndpoint.URL.Scheme, req.URL.Host)) + } + + checksumsJSON, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, err + } + remoteChecksums := []*ImgData{} + if err := json.Unmarshal(checksumsJSON, &remoteChecksums); err != nil { + return nil, err + } + + // Forge a better object from the retrieved data + imgsData := make(map[string]*ImgData) + for _, elem := range remoteChecksums { + imgsData[elem.ID] = elem + } + + return &RepositoryData{ + ImgList: imgsData, + Endpoints: endpoints, + Tokens: tokens, + }, nil +} + +func (r *Session) PushImageChecksumRegistry(imgData *ImgData, registry string, token []string) error { + + log.Debugf("[registry] Calling PUT %s", registry+"images/"+imgData.ID+"/checksum") + + req, err := r.reqFactory.NewRequest("PUT", registry+"images/"+imgData.ID+"/checksum", nil) + if err != nil { + return err + } + setTokenAuth(req, token) + req.Header.Set("X-Docker-Checksum", imgData.Checksum) + req.Header.Set("X-Docker-Checksum-Payload", imgData.ChecksumPayload) + + res, _, err := r.doRequest(req) + if err != nil { + return fmt.Errorf("Failed to upload metadata: %s", err) + } + defer res.Body.Close() + if len(res.Cookies()) > 0 { + r.jar.SetCookies(req.URL, res.Cookies()) + } + if res.StatusCode != 200 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + return fmt.Errorf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err) + } + var jsonBody map[string]string + if err := json.Unmarshal(errBody, &jsonBody); err != nil { + errBody = []byte(err.Error()) + } else if jsonBody["error"] == "Image already exists" { + return ErrAlreadyExists + } + return fmt.Errorf("HTTP code %d while uploading metadata: %s", res.StatusCode, errBody) + } + return nil +} + +// Push a local image to the registry +func (r *Session) PushImageJSONRegistry(imgData *ImgData, jsonRaw []byte, registry string, token []string) error { + + log.Debugf("[registry] Calling PUT %s", registry+"images/"+imgData.ID+"/json") + + req, err := r.reqFactory.NewRequest("PUT", registry+"images/"+imgData.ID+"/json", bytes.NewReader(jsonRaw)) + if err != nil { + return err + } + req.Header.Add("Content-type", "application/json") + setTokenAuth(req, token) + + res, _, err := r.doRequest(req) + if err != nil { + return fmt.Errorf("Failed to upload metadata: %s", err) + } + defer res.Body.Close() + if res.StatusCode == 401 && strings.HasPrefix(registry, "http://") { + return utils.NewHTTPRequestError("HTTP code 401, Docker will not send auth headers over HTTP.", res) + } + if res.StatusCode != 200 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + return utils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) + } + var jsonBody map[string]string + if err := json.Unmarshal(errBody, &jsonBody); err != nil { + errBody = []byte(err.Error()) + } else if jsonBody["error"] == "Image already exists" { + return ErrAlreadyExists + } + return utils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata: %s", res.StatusCode, errBody), res) + } + return nil +} + +func (r *Session) PushImageLayerRegistry(imgID string, layer io.Reader, registry string, token []string, jsonRaw []byte) (checksum string, checksumPayload string, err error) { + + log.Debugf("[registry] Calling PUT %s", registry+"images/"+imgID+"/layer") + + tarsumLayer, err := tarsum.NewTarSum(layer, false, tarsum.Version0) + if err != nil { + return "", "", err + } + h := sha256.New() + h.Write(jsonRaw) + h.Write([]byte{'\n'}) + checksumLayer := io.TeeReader(tarsumLayer, h) + + req, err := r.reqFactory.NewRequest("PUT", registry+"images/"+imgID+"/layer", checksumLayer) + if err != nil { + return "", "", err + } + req.Header.Add("Content-Type", "application/octet-stream") + req.ContentLength = -1 + req.TransferEncoding = []string{"chunked"} + setTokenAuth(req, token) + res, _, err := r.doRequest(req) + if err != nil { + return "", "", fmt.Errorf("Failed to upload layer: %s", err) + } + if rc, ok := layer.(io.Closer); ok { + if err := rc.Close(); err != nil { + return "", "", err + } + } + defer res.Body.Close() + + if res.StatusCode != 200 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", utils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) + } + return "", "", utils.NewHTTPRequestError(fmt.Sprintf("Received HTTP code %d while uploading layer: %s", res.StatusCode, errBody), res) + } + + checksumPayload = "sha256:" + hex.EncodeToString(h.Sum(nil)) + return tarsumLayer.Sum(jsonRaw), checksumPayload, nil +} + +// push a tag on the registry. +// Remote has the format '/ +func (r *Session) PushRegistryTag(remote, revision, tag, registry string, token []string) error { + // "jsonify" the string + revision = "\"" + revision + "\"" + path := fmt.Sprintf("repositories/%s/tags/%s", remote, tag) + + req, err := r.reqFactory.NewRequest("PUT", registry+path, strings.NewReader(revision)) + if err != nil { + return err + } + req.Header.Add("Content-type", "application/json") + setTokenAuth(req, token) + req.ContentLength = int64(len(revision)) + res, _, err := r.doRequest(req) + if err != nil { + return err + } + res.Body.Close() + if res.StatusCode != 200 && res.StatusCode != 201 { + return utils.NewHTTPRequestError(fmt.Sprintf("Internal server error: %d trying to push tag %s on %s", res.StatusCode, tag, remote), res) + } + return nil +} + +func (r *Session) PushImageJSONIndex(remote string, imgList []*ImgData, validate bool, regs []string) (*RepositoryData, error) { + cleanImgList := []*ImgData{} + + if validate { + for _, elem := range imgList { + if elem.Checksum != "" { + cleanImgList = append(cleanImgList, elem) + } + } + } else { + cleanImgList = imgList + } + + imgListJSON, err := json.Marshal(cleanImgList) + if err != nil { + return nil, err + } + var suffix string + if validate { + suffix = "images" + } + u := fmt.Sprintf("%srepositories/%s/%s", r.indexEndpoint.VersionString(1), remote, suffix) + log.Debugf("[registry] PUT %s", u) + log.Debugf("Image list pushed to index:\n%s", imgListJSON) + req, err := r.reqFactory.NewRequest("PUT", u, bytes.NewReader(imgListJSON)) + if err != nil { + return nil, err + } + req.Header.Add("Content-type", "application/json") + req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password) + req.ContentLength = int64(len(imgListJSON)) + req.Header.Set("X-Docker-Token", "true") + if validate { + req.Header["X-Docker-Endpoints"] = regs + } + + res, _, err := r.doRequest(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + + // Redirect if necessary + for res.StatusCode >= 300 && res.StatusCode < 400 { + log.Debugf("Redirected to %s", res.Header.Get("Location")) + req, err = r.reqFactory.NewRequest("PUT", res.Header.Get("Location"), bytes.NewReader(imgListJSON)) + if err != nil { + return nil, err + } + req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password) + req.ContentLength = int64(len(imgListJSON)) + req.Header.Set("X-Docker-Token", "true") + if validate { + req.Header["X-Docker-Endpoints"] = regs + } + res, _, err := r.doRequest(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + } + + var tokens, endpoints []string + if !validate { + if res.StatusCode != 200 && res.StatusCode != 201 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, err + } + return nil, utils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push repository %s: %s", res.StatusCode, remote, errBody), res) + } + if res.Header.Get("X-Docker-Token") != "" { + tokens = res.Header["X-Docker-Token"] + log.Debugf("Auth token: %v", tokens) + } else { + return nil, fmt.Errorf("Index response didn't contain an access token") + } + + if res.Header.Get("X-Docker-Endpoints") != "" { + endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.VersionString(1)) + if err != nil { + return nil, err + } + } else { + return nil, fmt.Errorf("Index response didn't contain any endpoints") + } + } + if validate { + if res.StatusCode != 204 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, err + } + return nil, utils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push checksums %s: %s", res.StatusCode, remote, errBody), res) + } + } + + return &RepositoryData{ + Tokens: tokens, + Endpoints: endpoints, + }, nil +} + +func (r *Session) SearchRepositories(term string) (*SearchResults, error) { + log.Debugf("Index server: %s", r.indexEndpoint) + u := r.indexEndpoint.VersionString(1) + "search?q=" + url.QueryEscape(term) + req, err := r.reqFactory.NewRequest("GET", u, nil) + if err != nil { + return nil, err + } + if r.authConfig != nil && len(r.authConfig.Username) > 0 { + req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password) + } + req.Header.Set("X-Docker-Token", "true") + res, _, err := r.doRequest(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != 200 { + return nil, utils.NewHTTPRequestError(fmt.Sprintf("Unexepected status code %d", res.StatusCode), res) + } + rawData, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, err + } + result := new(SearchResults) + err = json.Unmarshal(rawData, result) + return result, err +} + +func (r *Session) GetAuthConfig(withPasswd bool) *AuthConfig { + password := "" + if withPasswd { + password = r.authConfig.Password + } + return &AuthConfig{ + Username: r.authConfig.Username, + Password: password, + Email: r.authConfig.Email, + } +} + +func setTokenAuth(req *http.Request, token []string) { + if req.Header.Get("Authorization") == "" { // Don't override + req.Header.Set("Authorization", "Token "+strings.Join(token, ",")) + } +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/registry/session_v2.go b/tests/Godeps/_workspace/src/github.com/docker/docker/registry/session_v2.go new file mode 100644 index 0000000000..c0bc19b337 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/registry/session_v2.go @@ -0,0 +1,390 @@ +package registry + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/url" + "strconv" + + "github.com/docker/docker/pkg/log" + "github.com/docker/docker/utils" + "github.com/gorilla/mux" +) + +func newV2RegistryRouter() *mux.Router { + router := mux.NewRouter() + + v2Router := router.PathPrefix("/v2/").Subrouter() + + // Version Info + v2Router.Path("/version").Name("version") + + // Image Manifests + v2Router.Path("/manifest/{imagename:[a-z0-9-._/]+}/{tagname:[a-zA-Z0-9-._]+}").Name("manifests") + + // List Image Tags + v2Router.Path("/tags/{imagename:[a-z0-9-._/]+}").Name("tags") + + // Download a blob + v2Router.Path("/blob/{imagename:[a-z0-9-._/]+}/{sumtype:[a-z0-9._+-]+}/{sum:[a-fA-F0-9]{4,}}").Name("downloadBlob") + + // Upload a blob + v2Router.Path("/blob/{imagename:[a-z0-9-._/]+}/{sumtype:[a-z0-9._+-]+}").Name("uploadBlob") + + // Mounting a blob in an image + v2Router.Path("/mountblob/{imagename:[a-z0-9-._/]+}/{sumtype:[a-z0-9._+-]+}/{sum:[a-fA-F0-9]{4,}}").Name("mountBlob") + + return router +} + +// APIVersion2 /v2/ +var v2HTTPRoutes = newV2RegistryRouter() + +func getV2URL(e *Endpoint, routeName string, vars map[string]string) (*url.URL, error) { + route := v2HTTPRoutes.Get(routeName) + if route == nil { + return nil, fmt.Errorf("unknown regisry v2 route name: %q", routeName) + } + + varReplace := make([]string, 0, len(vars)*2) + for key, val := range vars { + varReplace = append(varReplace, key, val) + } + + routePath, err := route.URLPath(varReplace...) + if err != nil { + return nil, fmt.Errorf("unable to make registry route %q with vars %v: %s", routeName, vars, err) + } + u, err := url.Parse(REGISTRYSERVER) + if err != nil { + return nil, fmt.Errorf("invalid registry url: %s", err) + } + + return &url.URL{ + Scheme: u.Scheme, + Host: u.Host, + Path: routePath.Path, + }, nil +} + +// V2 Provenance POC + +func (r *Session) GetV2Version(token []string) (*RegistryInfo, error) { + routeURL, err := getV2URL(r.indexEndpoint, "version", nil) + if err != nil { + return nil, err + } + + method := "GET" + log.Debugf("[registry] Calling %q %s", method, routeURL.String()) + + req, err := r.reqFactory.NewRequest(method, routeURL.String(), nil) + if err != nil { + return nil, err + } + setTokenAuth(req, token) + res, _, err := r.doRequest(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != 200 { + return nil, utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d fetching Version", res.StatusCode), res) + } + + decoder := json.NewDecoder(res.Body) + versionInfo := new(RegistryInfo) + + err = decoder.Decode(versionInfo) + if err != nil { + return nil, fmt.Errorf("unable to decode GetV2Version JSON response: %s", err) + } + + return versionInfo, nil +} + +// +// 1) Check if TarSum of each layer exists /v2/ +// 1.a) if 200, continue +// 1.b) if 300, then push the +// 1.c) if anything else, err +// 2) PUT the created/signed manifest +// +func (r *Session) GetV2ImageManifest(imageName, tagName string, token []string) ([]byte, error) { + vars := map[string]string{ + "imagename": imageName, + "tagname": tagName, + } + + routeURL, err := getV2URL(r.indexEndpoint, "manifests", vars) + if err != nil { + return nil, err + } + + method := "GET" + log.Debugf("[registry] Calling %q %s", method, routeURL.String()) + + req, err := r.reqFactory.NewRequest(method, routeURL.String(), nil) + if err != nil { + return nil, err + } + setTokenAuth(req, token) + res, _, err := r.doRequest(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != 200 { + if res.StatusCode == 401 { + return nil, errLoginRequired + } else if res.StatusCode == 404 { + return nil, ErrDoesNotExist + } + return nil, utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch for %s:%s", res.StatusCode, imageName, tagName), res) + } + + buf, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, fmt.Errorf("Error while reading the http response: %s", err) + } + return buf, nil +} + +// - Succeeded to mount for this image scope +// - Failed with no error (So continue to Push the Blob) +// - Failed with error +func (r *Session) PostV2ImageMountBlob(imageName, sumType, sum string, token []string) (bool, error) { + vars := map[string]string{ + "imagename": imageName, + "sumtype": sumType, + "sum": sum, + } + + routeURL, err := getV2URL(r.indexEndpoint, "mountBlob", vars) + if err != nil { + return false, err + } + + method := "POST" + log.Debugf("[registry] Calling %q %s", method, routeURL.String()) + + req, err := r.reqFactory.NewRequest(method, routeURL.String(), nil) + if err != nil { + return false, err + } + setTokenAuth(req, token) + res, _, err := r.doRequest(req) + if err != nil { + return false, err + } + res.Body.Close() // close early, since we're not needing a body on this call .. yet? + switch res.StatusCode { + case 200: + // return something indicating no push needed + return true, nil + case 300: + // return something indicating blob push needed + return false, nil + } + return false, fmt.Errorf("Failed to mount %q - %s:%s : %d", imageName, sumType, sum, res.StatusCode) +} + +func (r *Session) GetV2ImageBlob(imageName, sumType, sum string, blobWrtr io.Writer, token []string) error { + vars := map[string]string{ + "imagename": imageName, + "sumtype": sumType, + "sum": sum, + } + + routeURL, err := getV2URL(r.indexEndpoint, "downloadBlob", vars) + if err != nil { + return err + } + + method := "GET" + log.Debugf("[registry] Calling %q %s", method, routeURL.String()) + req, err := r.reqFactory.NewRequest(method, routeURL.String(), nil) + if err != nil { + return err + } + setTokenAuth(req, token) + res, _, err := r.doRequest(req) + if err != nil { + return err + } + defer res.Body.Close() + if res.StatusCode != 200 { + if res.StatusCode == 401 { + return errLoginRequired + } + return utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to pull %s blob", res.StatusCode, imageName), res) + } + + _, err = io.Copy(blobWrtr, res.Body) + return err +} + +func (r *Session) GetV2ImageBlobReader(imageName, sumType, sum string, token []string) (io.ReadCloser, int64, error) { + vars := map[string]string{ + "imagename": imageName, + "sumtype": sumType, + "sum": sum, + } + + routeURL, err := getV2URL(r.indexEndpoint, "downloadBlob", vars) + if err != nil { + return nil, 0, err + } + + method := "GET" + log.Debugf("[registry] Calling %q %s", method, routeURL.String()) + req, err := r.reqFactory.NewRequest(method, routeURL.String(), nil) + if err != nil { + return nil, 0, err + } + setTokenAuth(req, token) + res, _, err := r.doRequest(req) + if err != nil { + return nil, 0, err + } + if res.StatusCode != 200 { + if res.StatusCode == 401 { + return nil, 0, errLoginRequired + } + return nil, 0, utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to pull %s blob", res.StatusCode, imageName), res) + } + lenStr := res.Header.Get("Content-Length") + l, err := strconv.ParseInt(lenStr, 10, 64) + if err != nil { + return nil, 0, err + } + + return res.Body, l, err +} + +// Push the image to the server for storage. +// 'layer' is an uncompressed reader of the blob to be pushed. +// The server will generate it's own checksum calculation. +func (r *Session) PutV2ImageBlob(imageName, sumType string, blobRdr io.Reader, token []string) (serverChecksum string, err error) { + vars := map[string]string{ + "imagename": imageName, + "sumtype": sumType, + } + + routeURL, err := getV2URL(r.indexEndpoint, "uploadBlob", vars) + if err != nil { + return "", err + } + + method := "PUT" + log.Debugf("[registry] Calling %q %s", method, routeURL.String()) + req, err := r.reqFactory.NewRequest(method, routeURL.String(), blobRdr) + if err != nil { + return "", err + } + setTokenAuth(req, token) + res, _, err := r.doRequest(req) + if err != nil { + return "", err + } + defer res.Body.Close() + if res.StatusCode != 201 { + if res.StatusCode == 401 { + return "", errLoginRequired + } + return "", utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to push %s blob", res.StatusCode, imageName), res) + } + + type sumReturn struct { + Checksum string `json:"checksum"` + } + + decoder := json.NewDecoder(res.Body) + var sumInfo sumReturn + + err = decoder.Decode(&sumInfo) + if err != nil { + return "", fmt.Errorf("unable to decode PutV2ImageBlob JSON response: %s", err) + } + + // XXX this is a json struct from the registry, with its checksum + return sumInfo.Checksum, nil +} + +// Finally Push the (signed) manifest of the blobs we've just pushed +func (r *Session) PutV2ImageManifest(imageName, tagName string, manifestRdr io.Reader, token []string) error { + vars := map[string]string{ + "imagename": imageName, + "tagname": tagName, + } + + routeURL, err := getV2URL(r.indexEndpoint, "manifests", vars) + if err != nil { + return err + } + + method := "PUT" + log.Debugf("[registry] Calling %q %s", method, routeURL.String()) + req, err := r.reqFactory.NewRequest(method, routeURL.String(), manifestRdr) + if err != nil { + return err + } + setTokenAuth(req, token) + res, _, err := r.doRequest(req) + if err != nil { + return err + } + res.Body.Close() + if res.StatusCode != 201 { + if res.StatusCode == 401 { + return errLoginRequired + } + return utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to push %s:%s manifest", res.StatusCode, imageName, tagName), res) + } + + return nil +} + +// Given a repository name, returns a json array of string tags +func (r *Session) GetV2RemoteTags(imageName string, token []string) ([]string, error) { + vars := map[string]string{ + "imagename": imageName, + } + + routeURL, err := getV2URL(r.indexEndpoint, "tags", vars) + if err != nil { + return nil, err + } + + method := "GET" + log.Debugf("[registry] Calling %q %s", method, routeURL.String()) + + req, err := r.reqFactory.NewRequest(method, routeURL.String(), nil) + if err != nil { + return nil, err + } + setTokenAuth(req, token) + res, _, err := r.doRequest(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != 200 { + if res.StatusCode == 401 { + return nil, errLoginRequired + } else if res.StatusCode == 404 { + return nil, ErrDoesNotExist + } + return nil, utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch for %s", res.StatusCode, imageName), res) + } + + decoder := json.NewDecoder(res.Body) + var tags []string + err = decoder.Decode(&tags) + if err != nil { + return nil, fmt.Errorf("Error while decoding the http response: %s", err) + } + return tags, nil +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/registry/types.go b/tests/Godeps/_workspace/src/github.com/docker/docker/registry/types.go new file mode 100644 index 0000000000..3b429f19af --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/registry/types.go @@ -0,0 +1,67 @@ +package registry + +type SearchResult struct { + StarCount int `json:"star_count"` + IsOfficial bool `json:"is_official"` + Name string `json:"name"` + IsTrusted bool `json:"is_trusted"` + Description string `json:"description"` +} + +type SearchResults struct { + Query string `json:"query"` + NumResults int `json:"num_results"` + Results []SearchResult `json:"results"` +} + +type RepositoryData struct { + ImgList map[string]*ImgData + Endpoints []string + Tokens []string +} + +type ImgData struct { + ID string `json:"id"` + Checksum string `json:"checksum,omitempty"` + ChecksumPayload string `json:"-"` + Tag string `json:",omitempty"` +} + +type RegistryInfo struct { + Version string `json:"version"` + Standalone bool `json:"standalone"` +} + +type FSLayer struct { + BlobSum string `json:"blobSum"` +} + +type ManifestHistory struct { + V1Compatibility string `json:"v1Compatibility"` +} + +type ManifestData struct { + Name string `json:"name"` + Tag string `json:"tag"` + Architecture string `json:"architecture"` + FSLayers []*FSLayer `json:"fsLayers"` + History []*ManifestHistory `json:"history"` + SchemaVersion int `json:"schemaVersion"` +} + +type APIVersion int + +func (av APIVersion) String() string { + return apiVersions[av] +} + +var DefaultAPIVersion APIVersion = APIVersion1 +var apiVersions = map[APIVersion]string{ + 1: "v1", + 2: "v2", +} + +const ( + APIVersion1 = iota + 1 + APIVersion2 +) diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/runconfig/compare.go b/tests/Godeps/_workspace/src/github.com/docker/docker/runconfig/compare.go new file mode 100644 index 0000000000..5c1bf46575 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/runconfig/compare.go @@ -0,0 +1,60 @@ +package runconfig + +// Compare two Config struct. Do not compare the "Image" nor "Hostname" fields +// If OpenStdin is set, then it differs +func Compare(a, b *Config) bool { + if a == nil || b == nil || + a.OpenStdin || b.OpenStdin { + return false + } + if a.AttachStdout != b.AttachStdout || + a.AttachStderr != b.AttachStderr || + a.User != b.User || + a.Memory != b.Memory || + a.MemorySwap != b.MemorySwap || + a.CpuShares != b.CpuShares || + a.OpenStdin != b.OpenStdin || + a.Tty != b.Tty { + return false + } + if len(a.Cmd) != len(b.Cmd) || + len(a.Env) != len(b.Env) || + len(a.PortSpecs) != len(b.PortSpecs) || + len(a.ExposedPorts) != len(b.ExposedPorts) || + len(a.Entrypoint) != len(b.Entrypoint) || + len(a.Volumes) != len(b.Volumes) { + return false + } + + for i := 0; i < len(a.Cmd); i++ { + if a.Cmd[i] != b.Cmd[i] { + return false + } + } + for i := 0; i < len(a.Env); i++ { + if a.Env[i] != b.Env[i] { + return false + } + } + for i := 0; i < len(a.PortSpecs); i++ { + if a.PortSpecs[i] != b.PortSpecs[i] { + return false + } + } + for k := range a.ExposedPorts { + if _, exists := b.ExposedPorts[k]; !exists { + return false + } + } + for i := 0; i < len(a.Entrypoint); i++ { + if a.Entrypoint[i] != b.Entrypoint[i] { + return false + } + } + for key := range a.Volumes { + if _, exists := b.Volumes[key]; !exists { + return false + } + } + return true +} diff --git a/tests/_vendor/src/github.com/docker/docker/runconfig/config.go b/tests/Godeps/_workspace/src/github.com/docker/docker/runconfig/config.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/runconfig/config.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/runconfig/config.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/runconfig/config_test.go b/tests/Godeps/_workspace/src/github.com/docker/docker/runconfig/config_test.go new file mode 100644 index 0000000000..d94ec4ec55 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/runconfig/config_test.go @@ -0,0 +1,264 @@ +package runconfig + +import ( + "fmt" + "strings" + "testing" + + "github.com/docker/docker/nat" +) + +func parse(t *testing.T, args string) (*Config, *HostConfig, error) { + config, hostConfig, _, err := parseRun(strings.Split(args+" ubuntu bash", " "), nil) + return config, hostConfig, err +} + +func mustParse(t *testing.T, args string) (*Config, *HostConfig) { + config, hostConfig, err := parse(t, args) + if err != nil { + t.Fatal(err) + } + return config, hostConfig +} + +// check if (a == c && b == d) || (a == d && b == c) +// because maps are randomized +func compareRandomizedStrings(a, b, c, d string) error { + if a == c && b == d { + return nil + } + if a == d && b == c { + return nil + } + return fmt.Errorf("strings don't match") +} + +func TestParseRunLinks(t *testing.T) { + if _, hostConfig := mustParse(t, "--link a:b"); len(hostConfig.Links) == 0 || hostConfig.Links[0] != "a:b" { + t.Fatalf("Error parsing links. Expected []string{\"a:b\"}, received: %v", hostConfig.Links) + } + if _, hostConfig := mustParse(t, "--link a:b --link c:d"); len(hostConfig.Links) < 2 || hostConfig.Links[0] != "a:b" || hostConfig.Links[1] != "c:d" { + t.Fatalf("Error parsing links. Expected []string{\"a:b\", \"c:d\"}, received: %v", hostConfig.Links) + } + if _, hostConfig := mustParse(t, ""); len(hostConfig.Links) != 0 { + t.Fatalf("Error parsing links. No link expected, received: %v", hostConfig.Links) + } + + if _, _, err := parse(t, "--link a"); err == nil { + t.Fatalf("Error parsing links. `--link a` should be an error but is not") + } + if _, _, err := parse(t, "--link"); err == nil { + t.Fatalf("Error parsing links. `--link` should be an error but is not") + } +} + +func TestParseRunAttach(t *testing.T) { + if config, _ := mustParse(t, "-a stdin"); !config.AttachStdin || config.AttachStdout || config.AttachStderr { + t.Fatalf("Error parsing attach flags. Expect only Stdin enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) + } + if config, _ := mustParse(t, "-a stdin -a stdout"); !config.AttachStdin || !config.AttachStdout || config.AttachStderr { + t.Fatalf("Error parsing attach flags. Expect only Stdin and Stdout enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) + } + if config, _ := mustParse(t, "-a stdin -a stdout -a stderr"); !config.AttachStdin || !config.AttachStdout || !config.AttachStderr { + t.Fatalf("Error parsing attach flags. Expect all attach enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) + } + if config, _ := mustParse(t, ""); config.AttachStdin || !config.AttachStdout || !config.AttachStderr { + t.Fatalf("Error parsing attach flags. Expect Stdin disabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) + } + + if _, _, err := parse(t, "-a"); err == nil { + t.Fatalf("Error parsing attach flags, `-a` should be an error but is not") + } + if _, _, err := parse(t, "-a invalid"); err == nil { + t.Fatalf("Error parsing attach flags, `-a invalid` should be an error but is not") + } + if _, _, err := parse(t, "-a invalid -a stdout"); err == nil { + t.Fatalf("Error parsing attach flags, `-a stdout -a invalid` should be an error but is not") + } + if _, _, err := parse(t, "-a stdout -a stderr -d"); err == nil { + t.Fatalf("Error parsing attach flags, `-a stdout -a stderr -d` should be an error but is not") + } + if _, _, err := parse(t, "-a stdin -d"); err == nil { + t.Fatalf("Error parsing attach flags, `-a stdin -d` should be an error but is not") + } + if _, _, err := parse(t, "-a stdout -d"); err == nil { + t.Fatalf("Error parsing attach flags, `-a stdout -d` should be an error but is not") + } + if _, _, err := parse(t, "-a stderr -d"); err == nil { + t.Fatalf("Error parsing attach flags, `-a stderr -d` should be an error but is not") + } + if _, _, err := parse(t, "-d --rm"); err == nil { + t.Fatalf("Error parsing attach flags, `-d --rm` should be an error but is not") + } +} + +func TestParseRunVolumes(t *testing.T) { + if config, hostConfig := mustParse(t, "-v /tmp"); hostConfig.Binds != nil { + t.Fatalf("Error parsing volume flags, `-v /tmp` should not mount-bind anything. Received %v", hostConfig.Binds) + } else if _, exists := config.Volumes["/tmp"]; !exists { + t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Received %v", config.Volumes) + } + + if config, hostConfig := mustParse(t, "-v /tmp -v /var"); hostConfig.Binds != nil { + t.Fatalf("Error parsing volume flags, `-v /tmp -v /var` should not mount-bind anything. Received %v", hostConfig.Binds) + } else if _, exists := config.Volumes["/tmp"]; !exists { + t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Recevied %v", config.Volumes) + } else if _, exists := config.Volumes["/var"]; !exists { + t.Fatalf("Error parsing volume flags, `-v /var` is missing from volumes. Received %v", config.Volumes) + } + + if _, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp" { + t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp` should mount-bind /hostTmp into /containeTmp. Received %v", hostConfig.Binds) + } + + if _, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp -v /hostVar:/containerVar"); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], "/hostTmp:/containerTmp", "/hostVar:/containerVar") != nil { + t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp -v /hostVar:/containerVar` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds) + } + + if _, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw"); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], "/hostTmp:/containerTmp:ro", "/hostVar:/containerVar:rw") != nil { + t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds) + } + + if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp -v /containerVar"); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != "/hostTmp:/containerTmp" { + t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp -v /containerVar` should mount-bind only /hostTmp into /containeTmp. Received %v", hostConfig.Binds) + } else if _, exists := config.Volumes["/containerVar"]; !exists { + t.Fatalf("Error parsing volume flags, `-v /containerVar` is missing from volumes. Received %v", config.Volumes) + } + + if config, hostConfig := mustParse(t, ""); hostConfig.Binds != nil { + t.Fatalf("Error parsing volume flags, without volume, nothing should be mount-binded. Received %v", hostConfig.Binds) + } else if len(config.Volumes) != 0 { + t.Fatalf("Error parsing volume flags, without volume, no volume should be present. Received %v", config.Volumes) + } + + if _, _, err := parse(t, "-v /"); err == nil { + t.Fatalf("Expected error, but got none") + } + + if _, _, err := parse(t, "-v /:/"); err == nil { + t.Fatalf("Error parsing volume flags, `-v /:/` should fail but didn't") + } + if _, _, err := parse(t, "-v"); err == nil { + t.Fatalf("Error parsing volume flags, `-v` should fail but didn't") + } + if _, _, err := parse(t, "-v /tmp:"); err == nil { + t.Fatalf("Error parsing volume flags, `-v /tmp:` should fail but didn't") + } + if _, _, err := parse(t, "-v /tmp:ro"); err == nil { + t.Fatalf("Error parsing volume flags, `-v /tmp:ro` should fail but didn't") + } + if _, _, err := parse(t, "-v /tmp::"); err == nil { + t.Fatalf("Error parsing volume flags, `-v /tmp::` should fail but didn't") + } + if _, _, err := parse(t, "-v :"); err == nil { + t.Fatalf("Error parsing volume flags, `-v :` should fail but didn't") + } + if _, _, err := parse(t, "-v ::"); err == nil { + t.Fatalf("Error parsing volume flags, `-v ::` should fail but didn't") + } + if _, _, err := parse(t, "-v /tmp:/tmp:/tmp:/tmp"); err == nil { + t.Fatalf("Error parsing volume flags, `-v /tmp:/tmp:/tmp:/tmp` should fail but didn't") + } +} + +func TestCompare(t *testing.T) { + volumes1 := make(map[string]struct{}) + volumes1["/test1"] = struct{}{} + config1 := Config{ + PortSpecs: []string{"1111:1111", "2222:2222"}, + Env: []string{"VAR1=1", "VAR2=2"}, + Volumes: volumes1, + } + config3 := Config{ + PortSpecs: []string{"0000:0000", "2222:2222"}, + Env: []string{"VAR1=1", "VAR2=2"}, + Volumes: volumes1, + } + volumes2 := make(map[string]struct{}) + volumes2["/test2"] = struct{}{} + config5 := Config{ + PortSpecs: []string{"0000:0000", "2222:2222"}, + Env: []string{"VAR1=1", "VAR2=2"}, + Volumes: volumes2, + } + if Compare(&config1, &config3) { + t.Fatalf("Compare should return false, PortSpecs are different") + } + if Compare(&config1, &config5) { + t.Fatalf("Compare should return false, Volumes are different") + } + if !Compare(&config1, &config1) { + t.Fatalf("Compare should return true") + } +} + +func TestMerge(t *testing.T) { + volumesImage := make(map[string]struct{}) + volumesImage["/test1"] = struct{}{} + volumesImage["/test2"] = struct{}{} + configImage := &Config{ + PortSpecs: []string{"1111:1111", "2222:2222"}, + Env: []string{"VAR1=1", "VAR2=2"}, + Volumes: volumesImage, + } + + volumesUser := make(map[string]struct{}) + volumesUser["/test3"] = struct{}{} + configUser := &Config{ + PortSpecs: []string{"3333:2222", "3333:3333"}, + Env: []string{"VAR2=3", "VAR3=3"}, + Volumes: volumesUser, + } + + if err := Merge(configUser, configImage); err != nil { + t.Error(err) + } + + if len(configUser.ExposedPorts) != 3 { + t.Fatalf("Expected 3 ExposedPorts, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts)) + } + for portSpecs := range configUser.ExposedPorts { + if portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" { + t.Fatalf("Expected 1111 or 2222 or 3333, found %s", portSpecs) + } + } + if len(configUser.Env) != 3 { + t.Fatalf("Expected 3 env var, VAR1=1, VAR2=3 and VAR3=3, found %d", len(configUser.Env)) + } + for _, env := range configUser.Env { + if env != "VAR1=1" && env != "VAR2=3" && env != "VAR3=3" { + t.Fatalf("Expected VAR1=1 or VAR2=3 or VAR3=3, found %s", env) + } + } + + if len(configUser.Volumes) != 3 { + t.Fatalf("Expected 3 volumes, /test1, /test2 and /test3, found %d", len(configUser.Volumes)) + } + for v := range configUser.Volumes { + if v != "/test1" && v != "/test2" && v != "/test3" { + t.Fatalf("Expected /test1 or /test2 or /test3, found %s", v) + } + } + + ports, _, err := nat.ParsePortSpecs([]string{"0000"}) + if err != nil { + t.Error(err) + } + configImage2 := &Config{ + ExposedPorts: ports, + } + + if err := Merge(configUser, configImage2); err != nil { + t.Error(err) + } + + if len(configUser.ExposedPorts) != 4 { + t.Fatalf("Expected 4 ExposedPorts, 0000, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts)) + } + for portSpecs := range configUser.ExposedPorts { + if portSpecs.Port() != "0000" && portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" { + t.Fatalf("Expected 0000 or 1111 or 2222 or 3333, found %s", portSpecs) + } + } + +} diff --git a/tests/_vendor/src/github.com/docker/docker/runconfig/exec.go b/tests/Godeps/_workspace/src/github.com/docker/docker/runconfig/exec.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/runconfig/exec.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/runconfig/exec.go diff --git a/tests/_vendor/src/github.com/docker/docker/runconfig/hostconfig.go b/tests/Godeps/_workspace/src/github.com/docker/docker/runconfig/hostconfig.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/runconfig/hostconfig.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/runconfig/hostconfig.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/runconfig/merge.go b/tests/Godeps/_workspace/src/github.com/docker/docker/runconfig/merge.go new file mode 100644 index 0000000000..0c60d1df0b --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/runconfig/merge.go @@ -0,0 +1,104 @@ +package runconfig + +import ( + "strings" + + "github.com/docker/docker/nat" + "github.com/docker/docker/pkg/log" +) + +func Merge(userConf, imageConf *Config) error { + if userConf.User == "" { + userConf.User = imageConf.User + } + if userConf.Memory == 0 { + userConf.Memory = imageConf.Memory + } + if userConf.MemorySwap == 0 { + userConf.MemorySwap = imageConf.MemorySwap + } + if userConf.CpuShares == 0 { + userConf.CpuShares = imageConf.CpuShares + } + if len(userConf.ExposedPorts) == 0 { + userConf.ExposedPorts = imageConf.ExposedPorts + } else if imageConf.ExposedPorts != nil { + if userConf.ExposedPorts == nil { + userConf.ExposedPorts = make(nat.PortSet) + } + for port := range imageConf.ExposedPorts { + if _, exists := userConf.ExposedPorts[port]; !exists { + userConf.ExposedPorts[port] = struct{}{} + } + } + } + + if len(userConf.PortSpecs) > 0 { + if userConf.ExposedPorts == nil { + userConf.ExposedPorts = make(nat.PortSet) + } + ports, _, err := nat.ParsePortSpecs(userConf.PortSpecs) + if err != nil { + return err + } + for port := range ports { + if _, exists := userConf.ExposedPorts[port]; !exists { + userConf.ExposedPorts[port] = struct{}{} + } + } + userConf.PortSpecs = nil + } + if len(imageConf.PortSpecs) > 0 { + // FIXME: I think we can safely remove this. Leaving it for now for the sake of reverse-compat paranoia. + log.Debugf("Migrating image port specs to containter: %s", strings.Join(imageConf.PortSpecs, ", ")) + if userConf.ExposedPorts == nil { + userConf.ExposedPorts = make(nat.PortSet) + } + + ports, _, err := nat.ParsePortSpecs(imageConf.PortSpecs) + if err != nil { + return err + } + for port := range ports { + if _, exists := userConf.ExposedPorts[port]; !exists { + userConf.ExposedPorts[port] = struct{}{} + } + } + } + + if len(userConf.Env) == 0 { + userConf.Env = imageConf.Env + } else { + for _, imageEnv := range imageConf.Env { + found := false + imageEnvKey := strings.Split(imageEnv, "=")[0] + for _, userEnv := range userConf.Env { + userEnvKey := strings.Split(userEnv, "=")[0] + if imageEnvKey == userEnvKey { + found = true + } + } + if !found { + userConf.Env = append(userConf.Env, imageEnv) + } + } + } + + if len(userConf.Entrypoint) == 0 { + if len(userConf.Cmd) == 0 { + userConf.Cmd = imageConf.Cmd + } + userConf.Entrypoint = imageConf.Entrypoint + } + if userConf.WorkingDir == "" { + userConf.WorkingDir = imageConf.WorkingDir + } + if len(userConf.Volumes) == 0 { + userConf.Volumes = imageConf.Volumes + } else { + for k, v := range imageConf.Volumes { + userConf.Volumes[k] = v + } + } + return nil +} diff --git a/tests/_vendor/src/github.com/docker/docker/runconfig/parse.go b/tests/Godeps/_workspace/src/github.com/docker/docker/runconfig/parse.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/runconfig/parse.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/runconfig/parse.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/runconfig/parse_test.go b/tests/Godeps/_workspace/src/github.com/docker/docker/runconfig/parse_test.go new file mode 100644 index 0000000000..e807180d4c --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/runconfig/parse_test.go @@ -0,0 +1,60 @@ +package runconfig + +import ( + "io/ioutil" + "testing" + + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/sysinfo" +) + +func parseRun(args []string, sysInfo *sysinfo.SysInfo) (*Config, *HostConfig, *flag.FlagSet, error) { + cmd := flag.NewFlagSet("run", flag.ContinueOnError) + cmd.SetOutput(ioutil.Discard) + cmd.Usage = nil + return Parse(cmd, args, sysInfo) +} + +func TestParseLxcConfOpt(t *testing.T) { + opts := []string{"lxc.utsname=docker", "lxc.utsname = docker "} + + for _, o := range opts { + k, v, err := parsers.ParseKeyValueOpt(o) + if err != nil { + t.FailNow() + } + if k != "lxc.utsname" { + t.Fail() + } + if v != "docker" { + t.Fail() + } + } +} + +func TestNetHostname(t *testing.T) { + if _, _, _, err := parseRun([]string{"-h=name", "img", "cmd"}, nil); err != nil { + t.Fatalf("Unexpected error: %s", err) + } + + if _, _, _, err := parseRun([]string{"--net=host", "img", "cmd"}, nil); err != nil { + t.Fatalf("Unexpected error: %s", err) + } + + if _, _, _, err := parseRun([]string{"-h=name", "--net=bridge", "img", "cmd"}, nil); err != nil { + t.Fatalf("Unexpected error: %s", err) + } + + if _, _, _, err := parseRun([]string{"-h=name", "--net=none", "img", "cmd"}, nil); err != nil { + t.Fatalf("Unexpected error: %s", err) + } + + if _, _, _, err := parseRun([]string{"-h=name", "--net=host", "img", "cmd"}, nil); err != ErrConflictNetworkHostname { + t.Fatalf("Expected error ErrConflictNetworkHostname, got: %s", err) + } + + if _, _, _, err := parseRun([]string{"-h=name", "--net=container:other", "img", "cmd"}, nil); err != ErrConflictNetworkHostname { + t.Fatalf("Expected error ErrConflictNetworkHostname, got: %s", err) + } +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/utils/daemon.go b/tests/Godeps/_workspace/src/github.com/docker/docker/utils/daemon.go new file mode 100644 index 0000000000..871122ed59 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/utils/daemon.go @@ -0,0 +1,36 @@ +package utils + +import ( + "fmt" + "io/ioutil" + "log" + "os" + "strconv" +) + +func CreatePidFile(pidfile string) error { + if pidString, err := ioutil.ReadFile(pidfile); err == nil { + pid, err := strconv.Atoi(string(pidString)) + if err == nil { + if _, err := os.Stat(fmt.Sprintf("/proc/%d/", pid)); err == nil { + return fmt.Errorf("pid file found, ensure docker is not running or delete %s", pidfile) + } + } + } + + file, err := os.Create(pidfile) + if err != nil { + return err + } + + defer file.Close() + + _, err = fmt.Fprintf(file, "%d", os.Getpid()) + return err +} + +func RemovePidFile(pidfile string) { + if err := os.Remove(pidfile); err != nil { + log.Printf("Error removing %s: %s", pidfile, err) + } +} diff --git a/tests/_vendor/src/github.com/docker/docker/utils/http.go b/tests/Godeps/_workspace/src/github.com/docker/docker/utils/http.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/utils/http.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/utils/http.go diff --git a/tests/_vendor/src/github.com/docker/docker/utils/jsonmessage.go b/tests/Godeps/_workspace/src/github.com/docker/docker/utils/jsonmessage.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/utils/jsonmessage.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/utils/jsonmessage.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/utils/jsonmessage_test.go b/tests/Godeps/_workspace/src/github.com/docker/docker/utils/jsonmessage_test.go new file mode 100644 index 0000000000..0ce9492c98 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/utils/jsonmessage_test.go @@ -0,0 +1,38 @@ +package utils + +import ( + "testing" +) + +func TestError(t *testing.T) { + je := JSONError{404, "Not found"} + if je.Error() != "Not found" { + t.Fatalf("Expected 'Not found' got '%s'", je.Error()) + } +} + +func TestProgress(t *testing.T) { + jp := JSONProgress{} + if jp.String() != "" { + t.Fatalf("Expected empty string, got '%s'", jp.String()) + } + + expected := " 1 B" + jp2 := JSONProgress{Current: 1} + if jp2.String() != expected { + t.Fatalf("Expected %q, got %q", expected, jp2.String()) + } + + expected = "[=========================> ] 50 B/100 B" + jp3 := JSONProgress{Current: 50, Total: 100} + if jp3.String() != expected { + t.Fatalf("Expected %q, got %q", expected, jp3.String()) + } + + // this number can't be negetive gh#7136 + expected = "[==============================================================>] 50 B/40 B" + jp4 := JSONProgress{Current: 50, Total: 40} + if jp4.String() != expected { + t.Fatalf("Expected %q, got %q", expected, jp4.String()) + } +} diff --git a/tests/_vendor/src/github.com/docker/docker/utils/progressreader.go b/tests/Godeps/_workspace/src/github.com/docker/docker/utils/progressreader.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/utils/progressreader.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/utils/progressreader.go diff --git a/tests/_vendor/src/github.com/docker/docker/utils/random.go b/tests/Godeps/_workspace/src/github.com/docker/docker/utils/random.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/utils/random.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/utils/random.go diff --git a/tests/_vendor/src/github.com/docker/docker/utils/streamformatter.go b/tests/Godeps/_workspace/src/github.com/docker/docker/utils/streamformatter.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/utils/streamformatter.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/utils/streamformatter.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/utils/streamformatter_test.go b/tests/Godeps/_workspace/src/github.com/docker/docker/utils/streamformatter_test.go new file mode 100644 index 0000000000..20610f6c01 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/utils/streamformatter_test.go @@ -0,0 +1,67 @@ +package utils + +import ( + "encoding/json" + "errors" + "reflect" + "testing" +) + +func TestFormatStream(t *testing.T) { + sf := NewStreamFormatter(true) + res := sf.FormatStream("stream") + if string(res) != `{"stream":"stream"}`+"\r\n" { + t.Fatalf("%q", res) + } +} + +func TestFormatStatus(t *testing.T) { + sf := NewStreamFormatter(true) + res := sf.FormatStatus("ID", "%s%d", "a", 1) + if string(res) != `{"status":"a1","id":"ID"}`+"\r\n" { + t.Fatalf("%q", res) + } +} + +func TestFormatSimpleError(t *testing.T) { + sf := NewStreamFormatter(true) + res := sf.FormatError(errors.New("Error for formatter")) + if string(res) != `{"errorDetail":{"message":"Error for formatter"},"error":"Error for formatter"}`+"\r\n" { + t.Fatalf("%q", res) + } +} + +func TestFormatJSONError(t *testing.T) { + sf := NewStreamFormatter(true) + err := &JSONError{Code: 50, Message: "Json error"} + res := sf.FormatError(err) + if string(res) != `{"errorDetail":{"code":50,"message":"Json error"},"error":"Json error"}`+"\r\n" { + t.Fatalf("%q", res) + } +} + +func TestFormatProgress(t *testing.T) { + sf := NewStreamFormatter(true) + progress := &JSONProgress{ + Current: 15, + Total: 30, + Start: 1, + } + res := sf.FormatProgress("id", "action", progress) + msg := &JSONMessage{} + if err := json.Unmarshal(res, msg); err != nil { + t.Fatal(err) + } + if msg.ID != "id" { + t.Fatalf("ID must be 'id', got: %s", msg.ID) + } + if msg.Status != "action" { + t.Fatalf("Status must be 'action', got: %s", msg.Status) + } + if msg.ProgressMessage != progress.String() { + t.Fatalf("ProgressMessage must be %s, got: %s", progress.String(), msg.ProgressMessage) + } + if !reflect.DeepEqual(msg.Progress, progress) { + t.Fatal("Original progress not equals progress from FormatProgress") + } +} diff --git a/tests/_vendor/src/github.com/docker/docker/utils/timeoutconn.go b/tests/Godeps/_workspace/src/github.com/docker/docker/utils/timeoutconn.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/utils/timeoutconn.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/utils/timeoutconn.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/utils/timeoutconn_test.go b/tests/Godeps/_workspace/src/github.com/docker/docker/utils/timeoutconn_test.go new file mode 100644 index 0000000000..d07b96cc06 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/utils/timeoutconn_test.go @@ -0,0 +1,33 @@ +package utils + +import ( + "bufio" + "fmt" + "net" + "net/http" + "net/http/httptest" + "testing" + "time" +) + +func TestTimeoutConnRead(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, "hello") + })) + defer ts.Close() + conn, err := net.Dial("tcp", ts.URL[7:]) + if err != nil { + t.Fatalf("failed to create connection to %q: %v", ts.URL, err) + } + tconn := NewTimeoutConn(conn, 1*time.Second) + + if _, err = bufio.NewReader(tconn).ReadString('\n'); err == nil { + t.Fatalf("expected timeout error, got none") + } + if _, err := fmt.Fprintf(tconn, "GET / HTTP/1.0\r\n\r\n"); err != nil { + t.Errorf("unexpected error: %v", err) + } + if _, err = bufio.NewReader(tconn).ReadString('\n'); err != nil { + t.Errorf("unexpected error: %v", err) + } +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/utils/tmpdir.go b/tests/Godeps/_workspace/src/github.com/docker/docker/utils/tmpdir.go new file mode 100644 index 0000000000..921a8f697c --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/utils/tmpdir.go @@ -0,0 +1,12 @@ +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd + +package utils + +import ( + "os" +) + +// TempDir returns the default directory to use for temporary files. +func TempDir(rootdir string) (string error) { + return os.TempDir(), nil +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/utils/tmpdir_unix.go b/tests/Godeps/_workspace/src/github.com/docker/docker/utils/tmpdir_unix.go new file mode 100644 index 0000000000..30d7c3a192 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/utils/tmpdir_unix.go @@ -0,0 +1,18 @@ +// +build darwin dragonfly freebsd linux netbsd openbsd + +package utils + +import ( + "os" + "path/filepath" +) + +// TempDir returns the default directory to use for temporary files. +func TempDir(rootDir string) (string, error) { + var tmpDir string + if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" { + tmpDir = filepath.Join(rootDir, "tmp") + } + err := os.MkdirAll(tmpDir, 0700) + return tmpDir, err +} diff --git a/tests/_vendor/src/github.com/docker/docker/utils/utils.go b/tests/Godeps/_workspace/src/github.com/docker/docker/utils/utils.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/utils/utils.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/utils/utils.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/utils/utils_test.go b/tests/Godeps/_workspace/src/github.com/docker/docker/utils/utils_test.go new file mode 100644 index 0000000000..ce304482b8 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/utils/utils_test.go @@ -0,0 +1,99 @@ +package utils + +import ( + "os" + "testing" +) + +func TestReplaceAndAppendEnvVars(t *testing.T) { + var ( + d = []string{"HOME=/"} + o = []string{"HOME=/root", "TERM=xterm"} + ) + + env := ReplaceOrAppendEnvValues(d, o) + if len(env) != 2 { + t.Fatalf("expected len of 2 got %d", len(env)) + } + if env[0] != "HOME=/root" { + t.Fatalf("expected HOME=/root got '%s'", env[0]) + } + if env[1] != "TERM=xterm" { + t.Fatalf("expected TERM=xterm got '%s'", env[1]) + } +} + +// Reading a symlink to a directory must return the directory +func TestReadSymlinkedDirectoryExistingDirectory(t *testing.T) { + var err error + if err = os.Mkdir("/tmp/testReadSymlinkToExistingDirectory", 0777); err != nil { + t.Errorf("failed to create directory: %s", err) + } + + if err = os.Symlink("/tmp/testReadSymlinkToExistingDirectory", "/tmp/dirLinkTest"); err != nil { + t.Errorf("failed to create symlink: %s", err) + } + + var path string + if path, err = ReadSymlinkedDirectory("/tmp/dirLinkTest"); err != nil { + t.Fatalf("failed to read symlink to directory: %s", err) + } + + if path != "/tmp/testReadSymlinkToExistingDirectory" { + t.Fatalf("symlink returned unexpected directory: %s", path) + } + + if err = os.Remove("/tmp/testReadSymlinkToExistingDirectory"); err != nil { + t.Errorf("failed to remove temporary directory: %s", err) + } + + if err = os.Remove("/tmp/dirLinkTest"); err != nil { + t.Errorf("failed to remove symlink: %s", err) + } +} + +// Reading a non-existing symlink must fail +func TestReadSymlinkedDirectoryNonExistingSymlink(t *testing.T) { + var path string + var err error + if path, err = ReadSymlinkedDirectory("/tmp/test/foo/Non/ExistingPath"); err == nil { + t.Fatalf("error expected for non-existing symlink") + } + + if path != "" { + t.Fatalf("expected empty path, but '%s' was returned", path) + } +} + +// Reading a symlink to a file must fail +func TestReadSymlinkedDirectoryToFile(t *testing.T) { + var err error + var file *os.File + + if file, err = os.Create("/tmp/testReadSymlinkToFile"); err != nil { + t.Fatalf("failed to create file: %s", err) + } + + file.Close() + + if err = os.Symlink("/tmp/testReadSymlinkToFile", "/tmp/fileLinkTest"); err != nil { + t.Errorf("failed to create symlink: %s", err) + } + + var path string + if path, err = ReadSymlinkedDirectory("/tmp/fileLinkTest"); err == nil { + t.Fatalf("ReadSymlinkedDirectory on a symlink to a file should've failed") + } + + if path != "" { + t.Fatalf("path should've been empty: %s", path) + } + + if err = os.Remove("/tmp/testReadSymlinkToFile"); err != nil { + t.Errorf("failed to remove file: %s", err) + } + + if err = os.Remove("/tmp/fileLinkTest"); err != nil { + t.Errorf("failed to remove symlink: %s", err) + } +} diff --git a/tests/_vendor/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/common.go b/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/common.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/common.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/common.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/example_test.go b/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/example_test.go new file mode 100644 index 0000000000..351eaa0e6c --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/example_test.go @@ -0,0 +1,79 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar_test + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "log" + "os" +) + +func Example() { + // Create a buffer to write our archive to. + buf := new(bytes.Buffer) + + // Create a new tar archive. + tw := tar.NewWriter(buf) + + // Add some files to the archive. + var files = []struct { + Name, Body string + }{ + {"readme.txt", "This archive contains some text files."}, + {"gopher.txt", "Gopher names:\nGeorge\nGeoffrey\nGonzo"}, + {"todo.txt", "Get animal handling licence."}, + } + for _, file := range files { + hdr := &tar.Header{ + Name: file.Name, + Size: int64(len(file.Body)), + } + if err := tw.WriteHeader(hdr); err != nil { + log.Fatalln(err) + } + if _, err := tw.Write([]byte(file.Body)); err != nil { + log.Fatalln(err) + } + } + // Make sure to check the error on Close. + if err := tw.Close(); err != nil { + log.Fatalln(err) + } + + // Open the tar archive for reading. + r := bytes.NewReader(buf.Bytes()) + tr := tar.NewReader(r) + + // Iterate through the files in the archive. + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + log.Fatalln(err) + } + fmt.Printf("Contents of %s:\n", hdr.Name) + if _, err := io.Copy(os.Stdout, tr); err != nil { + log.Fatalln(err) + } + fmt.Println() + } + + // Output: + // Contents of readme.txt: + // This archive contains some text files. + // Contents of gopher.txt: + // Gopher names: + // George + // Geoffrey + // Gonzo + // Contents of todo.txt: + // Get animal handling licence. +} diff --git a/tests/_vendor/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader.go b/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader_test.go b/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader_test.go new file mode 100644 index 0000000000..9601ffe459 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader_test.go @@ -0,0 +1,743 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar + +import ( + "bytes" + "crypto/md5" + "fmt" + "io" + "io/ioutil" + "os" + "reflect" + "strings" + "testing" + "time" +) + +type untarTest struct { + file string + headers []*Header + cksums []string +} + +var gnuTarTest = &untarTest{ + file: "testdata/gnu.tar", + headers: []*Header{ + { + Name: "small.txt", + Mode: 0640, + Uid: 73025, + Gid: 5000, + Size: 5, + ModTime: time.Unix(1244428340, 0), + Typeflag: '0', + Uname: "dsymonds", + Gname: "eng", + }, + { + Name: "small2.txt", + Mode: 0640, + Uid: 73025, + Gid: 5000, + Size: 11, + ModTime: time.Unix(1244436044, 0), + Typeflag: '0', + Uname: "dsymonds", + Gname: "eng", + }, + }, + cksums: []string{ + "e38b27eaccb4391bdec553a7f3ae6b2f", + "c65bd2e50a56a2138bf1716f2fd56fe9", + }, +} + +var sparseTarTest = &untarTest{ + file: "testdata/sparse-formats.tar", + headers: []*Header{ + { + Name: "sparse-gnu", + Mode: 420, + Uid: 1000, + Gid: 1000, + Size: 200, + ModTime: time.Unix(1392395740, 0), + Typeflag: 0x53, + Linkname: "", + Uname: "david", + Gname: "david", + Devmajor: 0, + Devminor: 0, + }, + { + Name: "sparse-posix-0.0", + Mode: 420, + Uid: 1000, + Gid: 1000, + Size: 200, + ModTime: time.Unix(1392342187, 0), + Typeflag: 0x30, + Linkname: "", + Uname: "david", + Gname: "david", + Devmajor: 0, + Devminor: 0, + }, + { + Name: "sparse-posix-0.1", + Mode: 420, + Uid: 1000, + Gid: 1000, + Size: 200, + ModTime: time.Unix(1392340456, 0), + Typeflag: 0x30, + Linkname: "", + Uname: "david", + Gname: "david", + Devmajor: 0, + Devminor: 0, + }, + { + Name: "sparse-posix-1.0", + Mode: 420, + Uid: 1000, + Gid: 1000, + Size: 200, + ModTime: time.Unix(1392337404, 0), + Typeflag: 0x30, + Linkname: "", + Uname: "david", + Gname: "david", + Devmajor: 0, + Devminor: 0, + }, + { + Name: "end", + Mode: 420, + Uid: 1000, + Gid: 1000, + Size: 4, + ModTime: time.Unix(1392398319, 0), + Typeflag: 0x30, + Linkname: "", + Uname: "david", + Gname: "david", + Devmajor: 0, + Devminor: 0, + }, + }, + cksums: []string{ + "6f53234398c2449fe67c1812d993012f", + "6f53234398c2449fe67c1812d993012f", + "6f53234398c2449fe67c1812d993012f", + "6f53234398c2449fe67c1812d993012f", + "b0061974914468de549a2af8ced10316", + }, +} + +var untarTests = []*untarTest{ + gnuTarTest, + sparseTarTest, + { + file: "testdata/star.tar", + headers: []*Header{ + { + Name: "small.txt", + Mode: 0640, + Uid: 73025, + Gid: 5000, + Size: 5, + ModTime: time.Unix(1244592783, 0), + Typeflag: '0', + Uname: "dsymonds", + Gname: "eng", + AccessTime: time.Unix(1244592783, 0), + ChangeTime: time.Unix(1244592783, 0), + }, + { + Name: "small2.txt", + Mode: 0640, + Uid: 73025, + Gid: 5000, + Size: 11, + ModTime: time.Unix(1244592783, 0), + Typeflag: '0', + Uname: "dsymonds", + Gname: "eng", + AccessTime: time.Unix(1244592783, 0), + ChangeTime: time.Unix(1244592783, 0), + }, + }, + }, + { + file: "testdata/v7.tar", + headers: []*Header{ + { + Name: "small.txt", + Mode: 0444, + Uid: 73025, + Gid: 5000, + Size: 5, + ModTime: time.Unix(1244593104, 0), + Typeflag: '\x00', + }, + { + Name: "small2.txt", + Mode: 0444, + Uid: 73025, + Gid: 5000, + Size: 11, + ModTime: time.Unix(1244593104, 0), + Typeflag: '\x00', + }, + }, + }, + { + file: "testdata/pax.tar", + headers: []*Header{ + { + Name: "a/123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100", + Mode: 0664, + Uid: 1000, + Gid: 1000, + Uname: "shane", + Gname: "shane", + Size: 7, + ModTime: time.Unix(1350244992, 23960108), + ChangeTime: time.Unix(1350244992, 23960108), + AccessTime: time.Unix(1350244992, 23960108), + Typeflag: TypeReg, + }, + { + Name: "a/b", + Mode: 0777, + Uid: 1000, + Gid: 1000, + Uname: "shane", + Gname: "shane", + Size: 0, + ModTime: time.Unix(1350266320, 910238425), + ChangeTime: time.Unix(1350266320, 910238425), + AccessTime: time.Unix(1350266320, 910238425), + Typeflag: TypeSymlink, + Linkname: "123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100", + }, + }, + }, + { + file: "testdata/nil-uid.tar", // golang.org/issue/5290 + headers: []*Header{ + { + Name: "P1050238.JPG.log", + Mode: 0664, + Uid: 0, + Gid: 0, + Size: 14, + ModTime: time.Unix(1365454838, 0), + Typeflag: TypeReg, + Linkname: "", + Uname: "eyefi", + Gname: "eyefi", + Devmajor: 0, + Devminor: 0, + }, + }, + }, + { + file: "testdata/xattrs.tar", + headers: []*Header{ + { + Name: "small.txt", + Mode: 0644, + Uid: 1000, + Gid: 10, + Size: 5, + ModTime: time.Unix(1386065770, 448252320), + Typeflag: '0', + Uname: "alex", + Gname: "wheel", + AccessTime: time.Unix(1389782991, 419875220), + ChangeTime: time.Unix(1389782956, 794414986), + Xattrs: map[string]string{ + "user.key": "value", + "user.key2": "value2", + // Interestingly, selinux encodes the terminating null inside the xattr + "security.selinux": "unconfined_u:object_r:default_t:s0\x00", + }, + }, + { + Name: "small2.txt", + Mode: 0644, + Uid: 1000, + Gid: 10, + Size: 11, + ModTime: time.Unix(1386065770, 449252304), + Typeflag: '0', + Uname: "alex", + Gname: "wheel", + AccessTime: time.Unix(1389782991, 419875220), + ChangeTime: time.Unix(1386065770, 449252304), + Xattrs: map[string]string{ + "security.selinux": "unconfined_u:object_r:default_t:s0\x00", + }, + }, + }, + }, +} + +func TestReader(t *testing.T) { +testLoop: + for i, test := range untarTests { + f, err := os.Open(test.file) + if err != nil { + t.Errorf("test %d: Unexpected error: %v", i, err) + continue + } + defer f.Close() + tr := NewReader(f) + for j, header := range test.headers { + hdr, err := tr.Next() + if err != nil || hdr == nil { + t.Errorf("test %d, entry %d: Didn't get entry: %v", i, j, err) + f.Close() + continue testLoop + } + if !reflect.DeepEqual(*hdr, *header) { + t.Errorf("test %d, entry %d: Incorrect header:\nhave %+v\nwant %+v", + i, j, *hdr, *header) + } + } + hdr, err := tr.Next() + if err == io.EOF { + continue testLoop + } + if hdr != nil || err != nil { + t.Errorf("test %d: Unexpected entry or error: hdr=%v err=%v", i, hdr, err) + } + } +} + +func TestPartialRead(t *testing.T) { + f, err := os.Open("testdata/gnu.tar") + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + defer f.Close() + + tr := NewReader(f) + + // Read the first four bytes; Next() should skip the last byte. + hdr, err := tr.Next() + if err != nil || hdr == nil { + t.Fatalf("Didn't get first file: %v", err) + } + buf := make([]byte, 4) + if _, err := io.ReadFull(tr, buf); err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if expected := []byte("Kilt"); !bytes.Equal(buf, expected) { + t.Errorf("Contents = %v, want %v", buf, expected) + } + + // Second file + hdr, err = tr.Next() + if err != nil || hdr == nil { + t.Fatalf("Didn't get second file: %v", err) + } + buf = make([]byte, 6) + if _, err := io.ReadFull(tr, buf); err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if expected := []byte("Google"); !bytes.Equal(buf, expected) { + t.Errorf("Contents = %v, want %v", buf, expected) + } +} + +func TestIncrementalRead(t *testing.T) { + test := gnuTarTest + f, err := os.Open(test.file) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + defer f.Close() + + tr := NewReader(f) + + headers := test.headers + cksums := test.cksums + nread := 0 + + // loop over all files + for ; ; nread++ { + hdr, err := tr.Next() + if hdr == nil || err == io.EOF { + break + } + + // check the header + if !reflect.DeepEqual(*hdr, *headers[nread]) { + t.Errorf("Incorrect header:\nhave %+v\nwant %+v", + *hdr, headers[nread]) + } + + // read file contents in little chunks EOF, + // checksumming all the way + h := md5.New() + rdbuf := make([]uint8, 8) + for { + nr, err := tr.Read(rdbuf) + if err == io.EOF { + break + } + if err != nil { + t.Errorf("Read: unexpected error %v\n", err) + break + } + h.Write(rdbuf[0:nr]) + } + // verify checksum + have := fmt.Sprintf("%x", h.Sum(nil)) + want := cksums[nread] + if want != have { + t.Errorf("Bad checksum on file %s:\nhave %+v\nwant %+v", hdr.Name, have, want) + } + } + if nread != len(headers) { + t.Errorf("Didn't process all files\nexpected: %d\nprocessed %d\n", len(headers), nread) + } +} + +func TestNonSeekable(t *testing.T) { + test := gnuTarTest + f, err := os.Open(test.file) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + defer f.Close() + + type readerOnly struct { + io.Reader + } + tr := NewReader(readerOnly{f}) + nread := 0 + + for ; ; nread++ { + _, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + } + + if nread != len(test.headers) { + t.Errorf("Didn't process all files\nexpected: %d\nprocessed %d\n", len(test.headers), nread) + } +} + +func TestParsePAXHeader(t *testing.T) { + paxTests := [][3]string{ + {"a", "a=name", "10 a=name\n"}, // Test case involving multiple acceptable lengths + {"a", "a=name", "9 a=name\n"}, // Test case involving multiple acceptable length + {"mtime", "mtime=1350244992.023960108", "30 mtime=1350244992.023960108\n"}} + for _, test := range paxTests { + key, expected, raw := test[0], test[1], test[2] + reader := bytes.NewReader([]byte(raw)) + headers, err := parsePAX(reader) + if err != nil { + t.Errorf("Couldn't parse correctly formatted headers: %v", err) + continue + } + if strings.EqualFold(headers[key], expected) { + t.Errorf("mtime header incorrectly parsed: got %s, wanted %s", headers[key], expected) + continue + } + trailer := make([]byte, 100) + n, err := reader.Read(trailer) + if err != io.EOF || n != 0 { + t.Error("Buffer wasn't consumed") + } + } + badHeader := bytes.NewReader([]byte("3 somelongkey=")) + if _, err := parsePAX(badHeader); err != ErrHeader { + t.Fatal("Unexpected success when parsing bad header") + } +} + +func TestParsePAXTime(t *testing.T) { + // Some valid PAX time values + timestamps := map[string]time.Time{ + "1350244992.023960108": time.Unix(1350244992, 23960108), // The common case + "1350244992.02396010": time.Unix(1350244992, 23960100), // Lower precision value + "1350244992.0239601089": time.Unix(1350244992, 23960108), // Higher precision value + "1350244992": time.Unix(1350244992, 0), // Low precision value + } + for input, expected := range timestamps { + ts, err := parsePAXTime(input) + if err != nil { + t.Fatal(err) + } + if !ts.Equal(expected) { + t.Fatalf("Time parsing failure %s %s", ts, expected) + } + } +} + +func TestMergePAX(t *testing.T) { + hdr := new(Header) + // Test a string, integer, and time based value. + headers := map[string]string{ + "path": "a/b/c", + "uid": "1000", + "mtime": "1350244992.023960108", + } + err := mergePAX(hdr, headers) + if err != nil { + t.Fatal(err) + } + want := &Header{ + Name: "a/b/c", + Uid: 1000, + ModTime: time.Unix(1350244992, 23960108), + } + if !reflect.DeepEqual(hdr, want) { + t.Errorf("incorrect merge: got %+v, want %+v", hdr, want) + } +} + +func TestSparseEndToEnd(t *testing.T) { + test := sparseTarTest + f, err := os.Open(test.file) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + defer f.Close() + + tr := NewReader(f) + + headers := test.headers + cksums := test.cksums + nread := 0 + + // loop over all files + for ; ; nread++ { + hdr, err := tr.Next() + if hdr == nil || err == io.EOF { + break + } + + // check the header + if !reflect.DeepEqual(*hdr, *headers[nread]) { + t.Errorf("Incorrect header:\nhave %+v\nwant %+v", + *hdr, headers[nread]) + } + + // read and checksum the file data + h := md5.New() + _, err = io.Copy(h, tr) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + + // verify checksum + have := fmt.Sprintf("%x", h.Sum(nil)) + want := cksums[nread] + if want != have { + t.Errorf("Bad checksum on file %s:\nhave %+v\nwant %+v", hdr.Name, have, want) + } + } + if nread != len(headers) { + t.Errorf("Didn't process all files\nexpected: %d\nprocessed %d\n", len(headers), nread) + } +} + +type sparseFileReadTest struct { + sparseData []byte + sparseMap []sparseEntry + realSize int64 + expected []byte +} + +var sparseFileReadTests = []sparseFileReadTest{ + { + sparseData: []byte("abcde"), + sparseMap: []sparseEntry{ + {offset: 0, numBytes: 2}, + {offset: 5, numBytes: 3}, + }, + realSize: 8, + expected: []byte("ab\x00\x00\x00cde"), + }, + { + sparseData: []byte("abcde"), + sparseMap: []sparseEntry{ + {offset: 0, numBytes: 2}, + {offset: 5, numBytes: 3}, + }, + realSize: 10, + expected: []byte("ab\x00\x00\x00cde\x00\x00"), + }, + { + sparseData: []byte("abcde"), + sparseMap: []sparseEntry{ + {offset: 1, numBytes: 3}, + {offset: 6, numBytes: 2}, + }, + realSize: 8, + expected: []byte("\x00abc\x00\x00de"), + }, + { + sparseData: []byte("abcde"), + sparseMap: []sparseEntry{ + {offset: 1, numBytes: 3}, + {offset: 6, numBytes: 2}, + }, + realSize: 10, + expected: []byte("\x00abc\x00\x00de\x00\x00"), + }, + { + sparseData: []byte(""), + sparseMap: nil, + realSize: 2, + expected: []byte("\x00\x00"), + }, +} + +func TestSparseFileReader(t *testing.T) { + for i, test := range sparseFileReadTests { + r := bytes.NewReader(test.sparseData) + nb := int64(r.Len()) + sfr := &sparseFileReader{ + rfr: ®FileReader{r: r, nb: nb}, + sp: test.sparseMap, + pos: 0, + tot: test.realSize, + } + if sfr.numBytes() != nb { + t.Errorf("test %d: Before reading, sfr.numBytes() = %d, want %d", i, sfr.numBytes(), nb) + } + buf, err := ioutil.ReadAll(sfr) + if err != nil { + t.Errorf("test %d: Unexpected error: %v", i, err) + } + if e := test.expected; !bytes.Equal(buf, e) { + t.Errorf("test %d: Contents = %v, want %v", i, buf, e) + } + if sfr.numBytes() != 0 { + t.Errorf("test %d: After draining the reader, numBytes() was nonzero", i) + } + } +} + +func TestSparseIncrementalRead(t *testing.T) { + sparseMap := []sparseEntry{{10, 2}} + sparseData := []byte("Go") + expected := "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00Go\x00\x00\x00\x00\x00\x00\x00\x00" + + r := bytes.NewReader(sparseData) + nb := int64(r.Len()) + sfr := &sparseFileReader{ + rfr: ®FileReader{r: r, nb: nb}, + sp: sparseMap, + pos: 0, + tot: int64(len(expected)), + } + + // We'll read the data 6 bytes at a time, with a hole of size 10 at + // the beginning and one of size 8 at the end. + var outputBuf bytes.Buffer + buf := make([]byte, 6) + for { + n, err := sfr.Read(buf) + if err == io.EOF { + break + } + if err != nil { + t.Errorf("Read: unexpected error %v\n", err) + } + if n > 0 { + _, err := outputBuf.Write(buf[:n]) + if err != nil { + t.Errorf("Write: unexpected error %v\n", err) + } + } + } + got := outputBuf.String() + if got != expected { + t.Errorf("Contents = %v, want %v", got, expected) + } +} + +func TestReadGNUSparseMap0x1(t *testing.T) { + headers := map[string]string{ + paxGNUSparseNumBlocks: "4", + paxGNUSparseMap: "0,5,10,5,20,5,30,5", + } + expected := []sparseEntry{ + {offset: 0, numBytes: 5}, + {offset: 10, numBytes: 5}, + {offset: 20, numBytes: 5}, + {offset: 30, numBytes: 5}, + } + + sp, err := readGNUSparseMap0x1(headers) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if !reflect.DeepEqual(sp, expected) { + t.Errorf("Incorrect sparse map: got %v, wanted %v", sp, expected) + } +} + +func TestReadGNUSparseMap1x0(t *testing.T) { + // This test uses lots of holes so the sparse header takes up more than two blocks + numEntries := 100 + expected := make([]sparseEntry, 0, numEntries) + sparseMap := new(bytes.Buffer) + + fmt.Fprintf(sparseMap, "%d\n", numEntries) + for i := 0; i < numEntries; i++ { + offset := int64(2048 * i) + numBytes := int64(1024) + expected = append(expected, sparseEntry{offset: offset, numBytes: numBytes}) + fmt.Fprintf(sparseMap, "%d\n%d\n", offset, numBytes) + } + + // Make the header the smallest multiple of blockSize that fits the sparseMap + headerBlocks := (sparseMap.Len() + blockSize - 1) / blockSize + bufLen := blockSize * headerBlocks + buf := make([]byte, bufLen) + copy(buf, sparseMap.Bytes()) + + // Get an reader to read the sparse map + r := bytes.NewReader(buf) + + // Read the sparse map + sp, err := readGNUSparseMap1x0(r) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if !reflect.DeepEqual(sp, expected) { + t.Errorf("Incorrect sparse map: got %v, wanted %v", sp, expected) + } +} + +func TestUninitializedRead(t *testing.T) { + test := gnuTarTest + f, err := os.Open(test.file) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + defer f.Close() + + tr := NewReader(f) + _, err = tr.Read([]byte{}) + if err == nil || err != io.EOF { + t.Errorf("Unexpected error: %v, wanted %v", err, io.EOF) + } + +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atim.go b/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atim.go new file mode 100644 index 0000000000..cf9cc79c59 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atim.go @@ -0,0 +1,20 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux dragonfly openbsd solaris + +package tar + +import ( + "syscall" + "time" +) + +func statAtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Atim.Unix()) +} + +func statCtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Ctim.Unix()) +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atimespec.go b/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atimespec.go new file mode 100644 index 0000000000..6f17dbe307 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atimespec.go @@ -0,0 +1,20 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd netbsd + +package tar + +import ( + "syscall" + "time" +) + +func statAtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Atimespec.Unix()) +} + +func statCtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Ctimespec.Unix()) +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_unix.go b/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_unix.go new file mode 100644 index 0000000000..cb843db4cf --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_unix.go @@ -0,0 +1,32 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux darwin dragonfly freebsd openbsd netbsd solaris + +package tar + +import ( + "os" + "syscall" +) + +func init() { + sysStat = statUnix +} + +func statUnix(fi os.FileInfo, h *Header) error { + sys, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return nil + } + h.Uid = int(sys.Uid) + h.Gid = int(sys.Gid) + // TODO(bradfitz): populate username & group. os/user + // doesn't cache LookupId lookups, and lacks group + // lookup functions. + h.AccessTime = statAtime(sys) + h.ChangeTime = statCtime(sys) + // TODO(bradfitz): major/minor device numbers? + return nil +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/tar_test.go b/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/tar_test.go new file mode 100644 index 0000000000..ed333f3ea4 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/tar_test.go @@ -0,0 +1,284 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar + +import ( + "bytes" + "io/ioutil" + "os" + "path" + "reflect" + "strings" + "testing" + "time" +) + +func TestFileInfoHeader(t *testing.T) { + fi, err := os.Stat("testdata/small.txt") + if err != nil { + t.Fatal(err) + } + h, err := FileInfoHeader(fi, "") + if err != nil { + t.Fatalf("FileInfoHeader: %v", err) + } + if g, e := h.Name, "small.txt"; g != e { + t.Errorf("Name = %q; want %q", g, e) + } + if g, e := h.Mode, int64(fi.Mode().Perm())|c_ISREG; g != e { + t.Errorf("Mode = %#o; want %#o", g, e) + } + if g, e := h.Size, int64(5); g != e { + t.Errorf("Size = %v; want %v", g, e) + } + if g, e := h.ModTime, fi.ModTime(); !g.Equal(e) { + t.Errorf("ModTime = %v; want %v", g, e) + } + // FileInfoHeader should error when passing nil FileInfo + if _, err := FileInfoHeader(nil, ""); err == nil { + t.Fatalf("Expected error when passing nil to FileInfoHeader") + } +} + +func TestFileInfoHeaderDir(t *testing.T) { + fi, err := os.Stat("testdata") + if err != nil { + t.Fatal(err) + } + h, err := FileInfoHeader(fi, "") + if err != nil { + t.Fatalf("FileInfoHeader: %v", err) + } + if g, e := h.Name, "testdata/"; g != e { + t.Errorf("Name = %q; want %q", g, e) + } + // Ignoring c_ISGID for golang.org/issue/4867 + if g, e := h.Mode&^c_ISGID, int64(fi.Mode().Perm())|c_ISDIR; g != e { + t.Errorf("Mode = %#o; want %#o", g, e) + } + if g, e := h.Size, int64(0); g != e { + t.Errorf("Size = %v; want %v", g, e) + } + if g, e := h.ModTime, fi.ModTime(); !g.Equal(e) { + t.Errorf("ModTime = %v; want %v", g, e) + } +} + +func TestFileInfoHeaderSymlink(t *testing.T) { + h, err := FileInfoHeader(symlink{}, "some-target") + if err != nil { + t.Fatal(err) + } + if g, e := h.Name, "some-symlink"; g != e { + t.Errorf("Name = %q; want %q", g, e) + } + if g, e := h.Linkname, "some-target"; g != e { + t.Errorf("Linkname = %q; want %q", g, e) + } +} + +type symlink struct{} + +func (symlink) Name() string { return "some-symlink" } +func (symlink) Size() int64 { return 0 } +func (symlink) Mode() os.FileMode { return os.ModeSymlink } +func (symlink) ModTime() time.Time { return time.Time{} } +func (symlink) IsDir() bool { return false } +func (symlink) Sys() interface{} { return nil } + +func TestRoundTrip(t *testing.T) { + data := []byte("some file contents") + + var b bytes.Buffer + tw := NewWriter(&b) + hdr := &Header{ + Name: "file.txt", + Uid: 1 << 21, // too big for 8 octal digits + Size: int64(len(data)), + ModTime: time.Now(), + } + // tar only supports second precision. + hdr.ModTime = hdr.ModTime.Add(-time.Duration(hdr.ModTime.Nanosecond()) * time.Nanosecond) + if err := tw.WriteHeader(hdr); err != nil { + t.Fatalf("tw.WriteHeader: %v", err) + } + if _, err := tw.Write(data); err != nil { + t.Fatalf("tw.Write: %v", err) + } + if err := tw.Close(); err != nil { + t.Fatalf("tw.Close: %v", err) + } + + // Read it back. + tr := NewReader(&b) + rHdr, err := tr.Next() + if err != nil { + t.Fatalf("tr.Next: %v", err) + } + if !reflect.DeepEqual(rHdr, hdr) { + t.Errorf("Header mismatch.\n got %+v\nwant %+v", rHdr, hdr) + } + rData, err := ioutil.ReadAll(tr) + if err != nil { + t.Fatalf("Read: %v", err) + } + if !bytes.Equal(rData, data) { + t.Errorf("Data mismatch.\n got %q\nwant %q", rData, data) + } +} + +type headerRoundTripTest struct { + h *Header + fm os.FileMode +} + +func TestHeaderRoundTrip(t *testing.T) { + golden := []headerRoundTripTest{ + // regular file. + { + h: &Header{ + Name: "test.txt", + Mode: 0644 | c_ISREG, + Size: 12, + ModTime: time.Unix(1360600916, 0), + Typeflag: TypeReg, + }, + fm: 0644, + }, + // hard link. + { + h: &Header{ + Name: "hard.txt", + Mode: 0644 | c_ISLNK, + Size: 0, + ModTime: time.Unix(1360600916, 0), + Typeflag: TypeLink, + }, + fm: 0644 | os.ModeSymlink, + }, + // symbolic link. + { + h: &Header{ + Name: "link.txt", + Mode: 0777 | c_ISLNK, + Size: 0, + ModTime: time.Unix(1360600852, 0), + Typeflag: TypeSymlink, + }, + fm: 0777 | os.ModeSymlink, + }, + // character device node. + { + h: &Header{ + Name: "dev/null", + Mode: 0666 | c_ISCHR, + Size: 0, + ModTime: time.Unix(1360578951, 0), + Typeflag: TypeChar, + }, + fm: 0666 | os.ModeDevice | os.ModeCharDevice, + }, + // block device node. + { + h: &Header{ + Name: "dev/sda", + Mode: 0660 | c_ISBLK, + Size: 0, + ModTime: time.Unix(1360578954, 0), + Typeflag: TypeBlock, + }, + fm: 0660 | os.ModeDevice, + }, + // directory. + { + h: &Header{ + Name: "dir/", + Mode: 0755 | c_ISDIR, + Size: 0, + ModTime: time.Unix(1360601116, 0), + Typeflag: TypeDir, + }, + fm: 0755 | os.ModeDir, + }, + // fifo node. + { + h: &Header{ + Name: "dev/initctl", + Mode: 0600 | c_ISFIFO, + Size: 0, + ModTime: time.Unix(1360578949, 0), + Typeflag: TypeFifo, + }, + fm: 0600 | os.ModeNamedPipe, + }, + // setuid. + { + h: &Header{ + Name: "bin/su", + Mode: 0755 | c_ISREG | c_ISUID, + Size: 23232, + ModTime: time.Unix(1355405093, 0), + Typeflag: TypeReg, + }, + fm: 0755 | os.ModeSetuid, + }, + // setguid. + { + h: &Header{ + Name: "group.txt", + Mode: 0750 | c_ISREG | c_ISGID, + Size: 0, + ModTime: time.Unix(1360602346, 0), + Typeflag: TypeReg, + }, + fm: 0750 | os.ModeSetgid, + }, + // sticky. + { + h: &Header{ + Name: "sticky.txt", + Mode: 0600 | c_ISREG | c_ISVTX, + Size: 7, + ModTime: time.Unix(1360602540, 0), + Typeflag: TypeReg, + }, + fm: 0600 | os.ModeSticky, + }, + } + + for i, g := range golden { + fi := g.h.FileInfo() + h2, err := FileInfoHeader(fi, "") + if err != nil { + t.Error(err) + continue + } + if strings.Contains(fi.Name(), "/") { + t.Errorf("FileInfo of %q contains slash: %q", g.h.Name, fi.Name()) + } + name := path.Base(g.h.Name) + if fi.IsDir() { + name += "/" + } + if got, want := h2.Name, name; got != want { + t.Errorf("i=%d: Name: got %v, want %v", i, got, want) + } + if got, want := h2.Size, g.h.Size; got != want { + t.Errorf("i=%d: Size: got %v, want %v", i, got, want) + } + if got, want := h2.Mode, g.h.Mode; got != want { + t.Errorf("i=%d: Mode: got %o, want %o", i, got, want) + } + if got, want := fi.Mode(), g.fm; got != want { + t.Errorf("i=%d: fi.Mode: got %o, want %o", i, got, want) + } + if got, want := h2.ModTime, g.h.ModTime; got != want { + t.Errorf("i=%d: ModTime: got %v, want %v", i, got, want) + } + if sysh, ok := fi.Sys().(*Header); !ok || sysh != g.h { + t.Errorf("i=%d: Sys didn't return original *Header", i) + } + } +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/gnu.tar b/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/gnu.tar new file mode 100644 index 0000000000..fc899dc8dc Binary files /dev/null and b/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/gnu.tar differ diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/nil-uid.tar b/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/nil-uid.tar new file mode 100644 index 0000000000..cc9cfaa33c Binary files /dev/null and b/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/nil-uid.tar differ diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/pax.tar b/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/pax.tar new file mode 100644 index 0000000000..9bc24b6587 Binary files /dev/null and b/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/pax.tar differ diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small.txt b/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small.txt new file mode 100644 index 0000000000..b249bfc518 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small.txt @@ -0,0 +1 @@ +Kilts \ No newline at end of file diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small2.txt b/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small2.txt new file mode 100644 index 0000000000..394ee3ecd0 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small2.txt @@ -0,0 +1 @@ +Google.com diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/sparse-formats.tar b/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/sparse-formats.tar new file mode 100644 index 0000000000..8bd4e74d50 Binary files /dev/null and b/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/sparse-formats.tar differ diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/star.tar b/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/star.tar new file mode 100644 index 0000000000..59e2d4e604 Binary files /dev/null and b/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/star.tar differ diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/ustar.tar b/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/ustar.tar new file mode 100644 index 0000000000..29679d9a30 Binary files /dev/null and b/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/ustar.tar differ diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/v7.tar b/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/v7.tar new file mode 100644 index 0000000000..eb65fc9410 Binary files /dev/null and b/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/v7.tar differ diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big-long.tar b/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big-long.tar new file mode 100644 index 0000000000..5960ee8247 Binary files /dev/null and b/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big-long.tar differ diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big.tar b/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big.tar new file mode 100644 index 0000000000..753e883ceb Binary files /dev/null and b/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big.tar differ diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer.tar b/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer.tar new file mode 100644 index 0000000000..e6d816ad07 Binary files /dev/null and b/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer.tar differ diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/xattrs.tar b/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/xattrs.tar new file mode 100644 index 0000000000..9701950edd Binary files /dev/null and b/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/xattrs.tar differ diff --git a/tests/_vendor/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer.go b/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer.go similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer.go rename to tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer_test.go b/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer_test.go new file mode 100644 index 0000000000..5e42e322f9 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer_test.go @@ -0,0 +1,491 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "reflect" + "strings" + "testing" + "testing/iotest" + "time" +) + +type writerTestEntry struct { + header *Header + contents string +} + +type writerTest struct { + file string // filename of expected output + entries []*writerTestEntry +} + +var writerTests = []*writerTest{ + // The writer test file was produced with this command: + // tar (GNU tar) 1.26 + // ln -s small.txt link.txt + // tar -b 1 --format=ustar -c -f writer.tar small.txt small2.txt link.txt + { + file: "testdata/writer.tar", + entries: []*writerTestEntry{ + { + header: &Header{ + Name: "small.txt", + Mode: 0640, + Uid: 73025, + Gid: 5000, + Size: 5, + ModTime: time.Unix(1246508266, 0), + Typeflag: '0', + Uname: "dsymonds", + Gname: "eng", + }, + contents: "Kilts", + }, + { + header: &Header{ + Name: "small2.txt", + Mode: 0640, + Uid: 73025, + Gid: 5000, + Size: 11, + ModTime: time.Unix(1245217492, 0), + Typeflag: '0', + Uname: "dsymonds", + Gname: "eng", + }, + contents: "Google.com\n", + }, + { + header: &Header{ + Name: "link.txt", + Mode: 0777, + Uid: 1000, + Gid: 1000, + Size: 0, + ModTime: time.Unix(1314603082, 0), + Typeflag: '2', + Linkname: "small.txt", + Uname: "strings", + Gname: "strings", + }, + // no contents + }, + }, + }, + // The truncated test file was produced using these commands: + // dd if=/dev/zero bs=1048576 count=16384 > /tmp/16gig.txt + // tar -b 1 -c -f- /tmp/16gig.txt | dd bs=512 count=8 > writer-big.tar + { + file: "testdata/writer-big.tar", + entries: []*writerTestEntry{ + { + header: &Header{ + Name: "tmp/16gig.txt", + Mode: 0640, + Uid: 73025, + Gid: 5000, + Size: 16 << 30, + ModTime: time.Unix(1254699560, 0), + Typeflag: '0', + Uname: "dsymonds", + Gname: "eng", + }, + // fake contents + contents: strings.Repeat("\x00", 4<<10), + }, + }, + }, + // The truncated test file was produced using these commands: + // dd if=/dev/zero bs=1048576 count=16384 > (longname/)*15 /16gig.txt + // tar -b 1 -c -f- (longname/)*15 /16gig.txt | dd bs=512 count=8 > writer-big-long.tar + { + file: "testdata/writer-big-long.tar", + entries: []*writerTestEntry{ + { + header: &Header{ + Name: strings.Repeat("longname/", 15) + "16gig.txt", + Mode: 0644, + Uid: 1000, + Gid: 1000, + Size: 16 << 30, + ModTime: time.Unix(1399583047, 0), + Typeflag: '0', + Uname: "guillaume", + Gname: "guillaume", + }, + // fake contents + contents: strings.Repeat("\x00", 4<<10), + }, + }, + }, + // This file was produced using gnu tar 1.17 + // gnutar -b 4 --format=ustar (longname/)*15 + file.txt + { + file: "testdata/ustar.tar", + entries: []*writerTestEntry{ + { + header: &Header{ + Name: strings.Repeat("longname/", 15) + "file.txt", + Mode: 0644, + Uid: 0765, + Gid: 024, + Size: 06, + ModTime: time.Unix(1360135598, 0), + Typeflag: '0', + Uname: "shane", + Gname: "staff", + }, + contents: "hello\n", + }, + }, + }, +} + +// Render byte array in a two-character hexadecimal string, spaced for easy visual inspection. +func bytestr(offset int, b []byte) string { + const rowLen = 32 + s := fmt.Sprintf("%04x ", offset) + for _, ch := range b { + switch { + case '0' <= ch && ch <= '9', 'A' <= ch && ch <= 'Z', 'a' <= ch && ch <= 'z': + s += fmt.Sprintf(" %c", ch) + default: + s += fmt.Sprintf(" %02x", ch) + } + } + return s +} + +// Render a pseudo-diff between two blocks of bytes. +func bytediff(a []byte, b []byte) string { + const rowLen = 32 + s := fmt.Sprintf("(%d bytes vs. %d bytes)\n", len(a), len(b)) + for offset := 0; len(a)+len(b) > 0; offset += rowLen { + na, nb := rowLen, rowLen + if na > len(a) { + na = len(a) + } + if nb > len(b) { + nb = len(b) + } + sa := bytestr(offset, a[0:na]) + sb := bytestr(offset, b[0:nb]) + if sa != sb { + s += fmt.Sprintf("-%v\n+%v\n", sa, sb) + } + a = a[na:] + b = b[nb:] + } + return s +} + +func TestWriter(t *testing.T) { +testLoop: + for i, test := range writerTests { + expected, err := ioutil.ReadFile(test.file) + if err != nil { + t.Errorf("test %d: Unexpected error: %v", i, err) + continue + } + + buf := new(bytes.Buffer) + tw := NewWriter(iotest.TruncateWriter(buf, 4<<10)) // only catch the first 4 KB + big := false + for j, entry := range test.entries { + big = big || entry.header.Size > 1<<10 + if err := tw.WriteHeader(entry.header); err != nil { + t.Errorf("test %d, entry %d: Failed writing header: %v", i, j, err) + continue testLoop + } + if _, err := io.WriteString(tw, entry.contents); err != nil { + t.Errorf("test %d, entry %d: Failed writing contents: %v", i, j, err) + continue testLoop + } + } + // Only interested in Close failures for the small tests. + if err := tw.Close(); err != nil && !big { + t.Errorf("test %d: Failed closing archive: %v", i, err) + continue testLoop + } + + actual := buf.Bytes() + if !bytes.Equal(expected, actual) { + t.Errorf("test %d: Incorrect result: (-=expected, +=actual)\n%v", + i, bytediff(expected, actual)) + } + if testing.Short() { // The second test is expensive. + break + } + } +} + +func TestPax(t *testing.T) { + // Create an archive with a large name + fileinfo, err := os.Stat("testdata/small.txt") + if err != nil { + t.Fatal(err) + } + hdr, err := FileInfoHeader(fileinfo, "") + if err != nil { + t.Fatalf("os.Stat: %v", err) + } + // Force a PAX long name to be written + longName := strings.Repeat("ab", 100) + contents := strings.Repeat(" ", int(hdr.Size)) + hdr.Name = longName + var buf bytes.Buffer + writer := NewWriter(&buf) + if err := writer.WriteHeader(hdr); err != nil { + t.Fatal(err) + } + if _, err = writer.Write([]byte(contents)); err != nil { + t.Fatal(err) + } + if err := writer.Close(); err != nil { + t.Fatal(err) + } + // Simple test to make sure PAX extensions are in effect + if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.")) { + t.Fatal("Expected at least one PAX header to be written.") + } + // Test that we can get a long name back out of the archive. + reader := NewReader(&buf) + hdr, err = reader.Next() + if err != nil { + t.Fatal(err) + } + if hdr.Name != longName { + t.Fatal("Couldn't recover long file name") + } +} + +func TestPaxSymlink(t *testing.T) { + // Create an archive with a large linkname + fileinfo, err := os.Stat("testdata/small.txt") + if err != nil { + t.Fatal(err) + } + hdr, err := FileInfoHeader(fileinfo, "") + hdr.Typeflag = TypeSymlink + if err != nil { + t.Fatalf("os.Stat:1 %v", err) + } + // Force a PAX long linkname to be written + longLinkname := strings.Repeat("1234567890/1234567890", 10) + hdr.Linkname = longLinkname + + hdr.Size = 0 + var buf bytes.Buffer + writer := NewWriter(&buf) + if err := writer.WriteHeader(hdr); err != nil { + t.Fatal(err) + } + if err := writer.Close(); err != nil { + t.Fatal(err) + } + // Simple test to make sure PAX extensions are in effect + if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.")) { + t.Fatal("Expected at least one PAX header to be written.") + } + // Test that we can get a long name back out of the archive. + reader := NewReader(&buf) + hdr, err = reader.Next() + if err != nil { + t.Fatal(err) + } + if hdr.Linkname != longLinkname { + t.Fatal("Couldn't recover long link name") + } +} + +func TestPaxNonAscii(t *testing.T) { + // Create an archive with non ascii. These should trigger a pax header + // because pax headers have a defined utf-8 encoding. + fileinfo, err := os.Stat("testdata/small.txt") + if err != nil { + t.Fatal(err) + } + + hdr, err := FileInfoHeader(fileinfo, "") + if err != nil { + t.Fatalf("os.Stat:1 %v", err) + } + + // some sample data + chineseFilename := "文件名" + chineseGroupname := "組" + chineseUsername := "用戶名" + + hdr.Name = chineseFilename + hdr.Gname = chineseGroupname + hdr.Uname = chineseUsername + + contents := strings.Repeat(" ", int(hdr.Size)) + + var buf bytes.Buffer + writer := NewWriter(&buf) + if err := writer.WriteHeader(hdr); err != nil { + t.Fatal(err) + } + if _, err = writer.Write([]byte(contents)); err != nil { + t.Fatal(err) + } + if err := writer.Close(); err != nil { + t.Fatal(err) + } + // Simple test to make sure PAX extensions are in effect + if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.")) { + t.Fatal("Expected at least one PAX header to be written.") + } + // Test that we can get a long name back out of the archive. + reader := NewReader(&buf) + hdr, err = reader.Next() + if err != nil { + t.Fatal(err) + } + if hdr.Name != chineseFilename { + t.Fatal("Couldn't recover unicode name") + } + if hdr.Gname != chineseGroupname { + t.Fatal("Couldn't recover unicode group") + } + if hdr.Uname != chineseUsername { + t.Fatal("Couldn't recover unicode user") + } +} + +func TestPaxXattrs(t *testing.T) { + xattrs := map[string]string{ + "user.key": "value", + } + + // Create an archive with an xattr + fileinfo, err := os.Stat("testdata/small.txt") + if err != nil { + t.Fatal(err) + } + hdr, err := FileInfoHeader(fileinfo, "") + if err != nil { + t.Fatalf("os.Stat: %v", err) + } + contents := "Kilts" + hdr.Xattrs = xattrs + var buf bytes.Buffer + writer := NewWriter(&buf) + if err := writer.WriteHeader(hdr); err != nil { + t.Fatal(err) + } + if _, err = writer.Write([]byte(contents)); err != nil { + t.Fatal(err) + } + if err := writer.Close(); err != nil { + t.Fatal(err) + } + // Test that we can get the xattrs back out of the archive. + reader := NewReader(&buf) + hdr, err = reader.Next() + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(hdr.Xattrs, xattrs) { + t.Fatalf("xattrs did not survive round trip: got %+v, want %+v", + hdr.Xattrs, xattrs) + } +} + +func TestPAXHeader(t *testing.T) { + medName := strings.Repeat("CD", 50) + longName := strings.Repeat("AB", 100) + paxTests := [][2]string{ + {paxPath + "=/etc/hosts", "19 path=/etc/hosts\n"}, + {"a=b", "6 a=b\n"}, // Single digit length + {"a=names", "11 a=names\n"}, // Test case involving carries + {paxPath + "=" + longName, fmt.Sprintf("210 path=%s\n", longName)}, + {paxPath + "=" + medName, fmt.Sprintf("110 path=%s\n", medName)}} + + for _, test := range paxTests { + key, expected := test[0], test[1] + if result := paxHeader(key); result != expected { + t.Fatalf("paxHeader: got %s, expected %s", result, expected) + } + } +} + +func TestUSTARLongName(t *testing.T) { + // Create an archive with a path that failed to split with USTAR extension in previous versions. + fileinfo, err := os.Stat("testdata/small.txt") + if err != nil { + t.Fatal(err) + } + hdr, err := FileInfoHeader(fileinfo, "") + hdr.Typeflag = TypeDir + if err != nil { + t.Fatalf("os.Stat:1 %v", err) + } + // Force a PAX long name to be written. The name was taken from a practical example + // that fails and replaced ever char through numbers to anonymize the sample. + longName := "/0000_0000000/00000-000000000/0000_0000000/00000-0000000000000/0000_0000000/00000-0000000-00000000/0000_0000000/00000000/0000_0000000/000/0000_0000000/00000000v00/0000_0000000/000000/0000_0000000/0000000/0000_0000000/00000y-00/0000/0000/00000000/0x000000/" + hdr.Name = longName + + hdr.Size = 0 + var buf bytes.Buffer + writer := NewWriter(&buf) + if err := writer.WriteHeader(hdr); err != nil { + t.Fatal(err) + } + if err := writer.Close(); err != nil { + t.Fatal(err) + } + // Test that we can get a long name back out of the archive. + reader := NewReader(&buf) + hdr, err = reader.Next() + if err != nil { + t.Fatal(err) + } + if hdr.Name != longName { + t.Fatal("Couldn't recover long name") + } +} + +func TestValidTypeflagWithPAXHeader(t *testing.T) { + var buffer bytes.Buffer + tw := NewWriter(&buffer) + + fileName := strings.Repeat("ab", 100) + + hdr := &Header{ + Name: fileName, + Size: 4, + Typeflag: 0, + } + if err := tw.WriteHeader(hdr); err != nil { + t.Fatalf("Failed to write header: %s", err) + } + if _, err := tw.Write([]byte("fooo")); err != nil { + t.Fatalf("Failed to write the file's data: %s", err) + } + tw.Close() + + tr := NewReader(&buffer) + + for { + header, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + t.Fatalf("Failed to read header: %s", err) + } + if header.Typeflag != 0 { + t.Fatalf("Typeflag should've been 0, found %d", header.Typeflag) + } + } +} diff --git a/tests/_vendor/src/github.com/docker/libcontainer/cgroups/cgroups.go b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/cgroups.go similarity index 96% rename from tests/_vendor/src/github.com/docker/libcontainer/cgroups/cgroups.go rename to tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/cgroups.go index 567e9a6c16..fe3600597b 100644 --- a/tests/_vendor/src/github.com/docker/libcontainer/cgroups/cgroups.go +++ b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/cgroups.go @@ -53,8 +53,3 @@ type Cgroup struct { Freezer FreezerState `json:"freezer,omitempty"` // set the freeze value for the process Slice string `json:"slice,omitempty"` // Parent slice to use for systemd } - -type ActiveCgroup interface { - Cleanup() error - Paths() (map[string]string, error) -} diff --git a/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/cgroups_test.go b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/cgroups_test.go new file mode 100644 index 0000000000..e8c52938ce --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/cgroups_test.go @@ -0,0 +1,27 @@ +package cgroups + +import ( + "bytes" + "testing" +) + +const ( + cgroupsContents = `11:hugetlb:/ +10:perf_event:/ +9:blkio:/ +8:net_cls:/ +7:freezer:/ +6:devices:/ +5:memory:/ +4:cpuacct,cpu:/ +3:cpuset:/ +2:name=systemd:/user.slice/user-1000.slice/session-16.scope` +) + +func TestParseCgroups(t *testing.T) { + r := bytes.NewBuffer([]byte(cgroupsContents)) + _, err := ParseCgroupFile("blkio", r) + if err != nil { + t.Fatal(err) + } +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/apply_raw.go b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/apply_raw.go new file mode 100644 index 0000000000..6f85793dd2 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/apply_raw.go @@ -0,0 +1,229 @@ +package fs + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + + "github.com/docker/libcontainer/cgroups" +) + +var ( + subsystems = map[string]subsystem{ + "devices": &DevicesGroup{}, + "memory": &MemoryGroup{}, + "cpu": &CpuGroup{}, + "cpuset": &CpusetGroup{}, + "cpuacct": &CpuacctGroup{}, + "blkio": &BlkioGroup{}, + "perf_event": &PerfEventGroup{}, + "freezer": &FreezerGroup{}, + } + CgroupProcesses = "cgroup.procs" +) + +// The absolute path to the root of the cgroup hierarchies. +var cgroupRoot string + +// TODO(vmarmol): Report error here, we'll probably need to wait for the new API. +func init() { + // we can pick any subsystem to find the root + cpuRoot, err := cgroups.FindCgroupMountpoint("cpu") + if err != nil { + return + } + cgroupRoot = filepath.Dir(cpuRoot) + + if _, err := os.Stat(cgroupRoot); err != nil { + return + } +} + +type subsystem interface { + // Returns the stats, as 'stats', corresponding to the cgroup under 'path'. + GetStats(path string, stats *cgroups.Stats) error + // Removes the cgroup represented by 'data'. + Remove(*data) error + // Creates and joins the cgroup represented by data. + Set(*data) error +} + +type data struct { + root string + cgroup string + c *cgroups.Cgroup + pid int +} + +func Apply(c *cgroups.Cgroup, pid int) (map[string]string, error) { + d, err := getCgroupData(c, pid) + if err != nil { + return nil, err + } + + paths := make(map[string]string) + defer func() { + if err != nil { + cgroups.RemovePaths(paths) + } + }() + for name, sys := range subsystems { + if err := sys.Set(d); err != nil { + return nil, err + } + // FIXME: Apply should, ideally, be reentrant or be broken up into a separate + // create and join phase so that the cgroup hierarchy for a container can be + // created then join consists of writing the process pids to cgroup.procs + p, err := d.path(name) + if err != nil { + if cgroups.IsNotFound(err) { + continue + } + return nil, err + } + paths[name] = p + } + return paths, nil +} + +// Symmetrical public function to update device based cgroups. Also available +// in the systemd implementation. +func ApplyDevices(c *cgroups.Cgroup, pid int) error { + d, err := getCgroupData(c, pid) + if err != nil { + return err + } + + devices := subsystems["devices"] + + return devices.Set(d) +} + +func GetStats(systemPaths map[string]string) (*cgroups.Stats, error) { + stats := cgroups.NewStats() + for name, path := range systemPaths { + sys, ok := subsystems[name] + if !ok { + continue + } + if err := sys.GetStats(path, stats); err != nil { + return nil, err + } + } + + return stats, nil +} + +// Freeze toggles the container's freezer cgroup depending on the state +// provided +func Freeze(c *cgroups.Cgroup, state cgroups.FreezerState) error { + d, err := getCgroupData(c, 0) + if err != nil { + return err + } + + c.Freezer = state + + freezer := subsystems["freezer"] + + return freezer.Set(d) +} + +func GetPids(c *cgroups.Cgroup) ([]int, error) { + d, err := getCgroupData(c, 0) + if err != nil { + return nil, err + } + + dir, err := d.path("devices") + if err != nil { + return nil, err + } + + return cgroups.ReadProcsFile(dir) +} + +func getCgroupData(c *cgroups.Cgroup, pid int) (*data, error) { + if cgroupRoot == "" { + return nil, fmt.Errorf("failed to find the cgroup root") + } + + cgroup := c.Name + if c.Parent != "" { + cgroup = filepath.Join(c.Parent, cgroup) + } + + return &data{ + root: cgroupRoot, + cgroup: cgroup, + c: c, + pid: pid, + }, nil +} + +func (raw *data) parent(subsystem string) (string, error) { + initPath, err := cgroups.GetInitCgroupDir(subsystem) + if err != nil { + return "", err + } + return filepath.Join(raw.root, subsystem, initPath), nil +} + +func (raw *data) path(subsystem string) (string, error) { + // If the cgroup name/path is absolute do not look relative to the cgroup of the init process. + if filepath.IsAbs(raw.cgroup) { + path := filepath.Join(raw.root, subsystem, raw.cgroup) + + if _, err := os.Stat(path); err != nil { + if os.IsNotExist(err) { + return "", cgroups.NewNotFoundError(subsystem) + } + + return "", err + } + + return path, nil + } + + parent, err := raw.parent(subsystem) + if err != nil { + return "", err + } + + return filepath.Join(parent, raw.cgroup), nil +} + +func (raw *data) join(subsystem string) (string, error) { + path, err := raw.path(subsystem) + if err != nil { + return "", err + } + if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) { + return "", err + } + if err := writeFile(path, CgroupProcesses, strconv.Itoa(raw.pid)); err != nil { + return "", err + } + return path, nil +} + +func writeFile(dir, file, data string) error { + return ioutil.WriteFile(filepath.Join(dir, file), []byte(data), 0700) +} + +func readFile(dir, file string) (string, error) { + data, err := ioutil.ReadFile(filepath.Join(dir, file)) + return string(data), err +} + +func removePath(p string, err error) error { + if err != nil { + return err + } + if p != "" { + return os.RemoveAll(p) + } + return nil +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/blkio.go b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/blkio.go new file mode 100644 index 0000000000..ce824d56c2 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/blkio.go @@ -0,0 +1,187 @@ +package fs + +import ( + "bufio" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/docker/libcontainer/cgroups" +) + +type BlkioGroup struct { +} + +func (s *BlkioGroup) Set(d *data) error { + // we just want to join this group even though we don't set anything + if _, err := d.join("blkio"); err != nil && !cgroups.IsNotFound(err) { + return err + } + + return nil +} + +func (s *BlkioGroup) Remove(d *data) error { + return removePath(d.path("blkio")) +} + +/* +examples: + + blkio.sectors + 8:0 6792 + + blkio.io_service_bytes + 8:0 Read 1282048 + 8:0 Write 2195456 + 8:0 Sync 2195456 + 8:0 Async 1282048 + 8:0 Total 3477504 + Total 3477504 + + blkio.io_serviced + 8:0 Read 124 + 8:0 Write 104 + 8:0 Sync 104 + 8:0 Async 124 + 8:0 Total 228 + Total 228 + + blkio.io_queued + 8:0 Read 0 + 8:0 Write 0 + 8:0 Sync 0 + 8:0 Async 0 + 8:0 Total 0 + Total 0 +*/ + +func splitBlkioStatLine(r rune) bool { + return r == ' ' || r == ':' +} + +func getBlkioStat(path string) ([]cgroups.BlkioStatEntry, error) { + var blkioStats []cgroups.BlkioStatEntry + f, err := os.Open(path) + if err != nil { + if os.IsNotExist(err) { + return blkioStats, nil + } + return nil, err + } + defer f.Close() + + sc := bufio.NewScanner(f) + for sc.Scan() { + // format: dev type amount + fields := strings.FieldsFunc(sc.Text(), splitBlkioStatLine) + if len(fields) < 3 { + if len(fields) == 2 && fields[0] == "Total" { + // skip total line + continue + } else { + return nil, fmt.Errorf("Invalid line found while parsing %s: %s", path, sc.Text()) + } + } + + v, err := strconv.ParseUint(fields[0], 10, 64) + if err != nil { + return nil, err + } + major := v + + v, err = strconv.ParseUint(fields[1], 10, 64) + if err != nil { + return nil, err + } + minor := v + + op := "" + valueField := 2 + if len(fields) == 4 { + op = fields[2] + valueField = 3 + } + v, err = strconv.ParseUint(fields[valueField], 10, 64) + if err != nil { + return nil, err + } + blkioStats = append(blkioStats, cgroups.BlkioStatEntry{Major: major, Minor: minor, Op: op, Value: v}) + } + + return blkioStats, nil +} + +func (s *BlkioGroup) GetStats(path string, stats *cgroups.Stats) error { + // Try to read CFQ stats available on all CFQ enabled kernels first + if blkioStats, err := getBlkioStat(filepath.Join(path, "blkio.io_serviced_recursive")); err == nil && blkioStats != nil { + return getCFQStats(path, stats) + } + return getStats(path, stats) // Use generic stats as fallback +} + +func getCFQStats(path string, stats *cgroups.Stats) error { + var blkioStats []cgroups.BlkioStatEntry + var err error + + if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.sectors_recursive")); err != nil { + return err + } + stats.BlkioStats.SectorsRecursive = blkioStats + + if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_service_bytes_recursive")); err != nil { + return err + } + stats.BlkioStats.IoServiceBytesRecursive = blkioStats + + if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_serviced_recursive")); err != nil { + return err + } + stats.BlkioStats.IoServicedRecursive = blkioStats + + if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_queued_recursive")); err != nil { + return err + } + stats.BlkioStats.IoQueuedRecursive = blkioStats + + if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_service_time_recursive")); err != nil { + return err + } + stats.BlkioStats.IoServiceTimeRecursive = blkioStats + + if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_wait_time_recursive")); err != nil { + return err + } + stats.BlkioStats.IoWaitTimeRecursive = blkioStats + + if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_merged_recursive")); err != nil { + return err + } + stats.BlkioStats.IoMergedRecursive = blkioStats + + if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.time_recursive")); err != nil { + return err + } + stats.BlkioStats.IoTimeRecursive = blkioStats + + return nil +} + +func getStats(path string, stats *cgroups.Stats) error { + var blkioStats []cgroups.BlkioStatEntry + var err error + + if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.throttle.io_service_bytes")); err != nil { + return err + } + stats.BlkioStats.IoServiceBytesRecursive = blkioStats + + if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.throttle.io_serviced")); err != nil { + return err + } + stats.BlkioStats.IoServicedRecursive = blkioStats + + return nil +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/blkio_test.go b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/blkio_test.go new file mode 100644 index 0000000000..6cd38cbaba --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/blkio_test.go @@ -0,0 +1,414 @@ +package fs + +import ( + "testing" + + "github.com/docker/libcontainer/cgroups" +) + +const ( + sectorsRecursiveContents = `8:0 1024` + serviceBytesRecursiveContents = `8:0 Read 100 +8:0 Write 200 +8:0 Sync 300 +8:0 Async 500 +8:0 Total 500 +Total 500` + servicedRecursiveContents = `8:0 Read 10 +8:0 Write 40 +8:0 Sync 20 +8:0 Async 30 +8:0 Total 50 +Total 50` + queuedRecursiveContents = `8:0 Read 1 +8:0 Write 4 +8:0 Sync 2 +8:0 Async 3 +8:0 Total 5 +Total 5` + serviceTimeRecursiveContents = `8:0 Read 173959 +8:0 Write 0 +8:0 Sync 0 +8:0 Async 173959 +8:0 Total 17395 +Total 17395` + waitTimeRecursiveContents = `8:0 Read 15571 +8:0 Write 0 +8:0 Sync 0 +8:0 Async 15571 +8:0 Total 15571` + mergedRecursiveContents = `8:0 Read 5 +8:0 Write 10 +8:0 Sync 0 +8:0 Async 0 +8:0 Total 15 +Total 15` + timeRecursiveContents = `8:0 8` + throttleServiceBytes = `8:0 Read 11030528 +8:0 Write 23 +8:0 Sync 42 +8:0 Async 11030528 +8:0 Total 11030528 +252:0 Read 11030528 +252:0 Write 23 +252:0 Sync 42 +252:0 Async 11030528 +252:0 Total 11030528 +Total 22061056` + throttleServiced = `8:0 Read 164 +8:0 Write 23 +8:0 Sync 42 +8:0 Async 164 +8:0 Total 164 +252:0 Read 164 +252:0 Write 23 +252:0 Sync 42 +252:0 Async 164 +252:0 Total 164 +Total 328` +) + +func appendBlkioStatEntry(blkioStatEntries *[]cgroups.BlkioStatEntry, major, minor, value uint64, op string) { + *blkioStatEntries = append(*blkioStatEntries, cgroups.BlkioStatEntry{Major: major, Minor: minor, Value: value, Op: op}) +} + +func TestBlkioStats(t *testing.T) { + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_service_bytes_recursive": serviceBytesRecursiveContents, + "blkio.io_serviced_recursive": servicedRecursiveContents, + "blkio.io_queued_recursive": queuedRecursiveContents, + "blkio.io_service_time_recursive": serviceTimeRecursiveContents, + "blkio.io_wait_time_recursive": waitTimeRecursiveContents, + "blkio.io_merged_recursive": mergedRecursiveContents, + "blkio.time_recursive": timeRecursiveContents, + "blkio.sectors_recursive": sectorsRecursiveContents, + }) + + blkio := &BlkioGroup{} + actualStats := *cgroups.NewStats() + err := blkio.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatal(err) + } + + // Verify expected stats. + expectedStats := cgroups.BlkioStats{} + appendBlkioStatEntry(&expectedStats.SectorsRecursive, 8, 0, 1024, "") + + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 100, "Read") + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 200, "Write") + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 300, "Sync") + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 500, "Async") + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 500, "Total") + + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 10, "Read") + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 40, "Write") + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 20, "Sync") + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 30, "Async") + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 50, "Total") + + appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 1, "Read") + appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 4, "Write") + appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 2, "Sync") + appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 3, "Async") + appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 5, "Total") + + appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 173959, "Read") + appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 0, "Write") + appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 0, "Sync") + appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 173959, "Async") + appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 17395, "Total") + + appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 15571, "Read") + appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 0, "Write") + appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 0, "Sync") + appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 15571, "Async") + appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 15571, "Total") + + appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 5, "Read") + appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 10, "Write") + appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 0, "Sync") + appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 0, "Async") + appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 15, "Total") + + appendBlkioStatEntry(&expectedStats.IoTimeRecursive, 8, 0, 8, "") + + expectBlkioStatsEquals(t, expectedStats, actualStats.BlkioStats) +} + +func TestBlkioStatsNoSectorsFile(t *testing.T) { + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_service_bytes_recursive": serviceBytesRecursiveContents, + "blkio.io_serviced_recursive": servicedRecursiveContents, + "blkio.io_queued_recursive": queuedRecursiveContents, + "blkio.io_service_time_recursive": serviceTimeRecursiveContents, + "blkio.io_wait_time_recursive": waitTimeRecursiveContents, + "blkio.io_merged_recursive": mergedRecursiveContents, + "blkio.time_recursive": timeRecursiveContents, + }) + + blkio := &BlkioGroup{} + actualStats := *cgroups.NewStats() + err := blkio.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatalf("Failed unexpectedly: %s", err) + } +} + +func TestBlkioStatsNoServiceBytesFile(t *testing.T) { + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_serviced_recursive": servicedRecursiveContents, + "blkio.io_queued_recursive": queuedRecursiveContents, + "blkio.sectors_recursive": sectorsRecursiveContents, + "blkio.io_service_time_recursive": serviceTimeRecursiveContents, + "blkio.io_wait_time_recursive": waitTimeRecursiveContents, + "blkio.io_merged_recursive": mergedRecursiveContents, + "blkio.time_recursive": timeRecursiveContents, + }) + + blkio := &BlkioGroup{} + actualStats := *cgroups.NewStats() + err := blkio.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatalf("Failed unexpectedly: %s", err) + } +} + +func TestBlkioStatsNoServicedFile(t *testing.T) { + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_service_bytes_recursive": serviceBytesRecursiveContents, + "blkio.io_queued_recursive": queuedRecursiveContents, + "blkio.sectors_recursive": sectorsRecursiveContents, + "blkio.io_service_time_recursive": serviceTimeRecursiveContents, + "blkio.io_wait_time_recursive": waitTimeRecursiveContents, + "blkio.io_merged_recursive": mergedRecursiveContents, + "blkio.time_recursive": timeRecursiveContents, + }) + + blkio := &BlkioGroup{} + actualStats := *cgroups.NewStats() + err := blkio.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatalf("Failed unexpectedly: %s", err) + } +} + +func TestBlkioStatsNoQueuedFile(t *testing.T) { + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_service_bytes_recursive": serviceBytesRecursiveContents, + "blkio.io_serviced_recursive": servicedRecursiveContents, + "blkio.sectors_recursive": sectorsRecursiveContents, + "blkio.io_service_time_recursive": serviceTimeRecursiveContents, + "blkio.io_wait_time_recursive": waitTimeRecursiveContents, + "blkio.io_merged_recursive": mergedRecursiveContents, + "blkio.time_recursive": timeRecursiveContents, + }) + + blkio := &BlkioGroup{} + actualStats := *cgroups.NewStats() + err := blkio.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatalf("Failed unexpectedly: %s", err) + } +} + +func TestBlkioStatsNoServiceTimeFile(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_service_bytes_recursive": serviceBytesRecursiveContents, + "blkio.io_serviced_recursive": servicedRecursiveContents, + "blkio.io_queued_recursive": queuedRecursiveContents, + "blkio.io_wait_time_recursive": waitTimeRecursiveContents, + "blkio.io_merged_recursive": mergedRecursiveContents, + "blkio.time_recursive": timeRecursiveContents, + "blkio.sectors_recursive": sectorsRecursiveContents, + }) + + blkio := &BlkioGroup{} + actualStats := *cgroups.NewStats() + err := blkio.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatalf("Failed unexpectedly: %s", err) + } +} + +func TestBlkioStatsNoWaitTimeFile(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_service_bytes_recursive": serviceBytesRecursiveContents, + "blkio.io_serviced_recursive": servicedRecursiveContents, + "blkio.io_queued_recursive": queuedRecursiveContents, + "blkio.io_service_time_recursive": serviceTimeRecursiveContents, + "blkio.io_merged_recursive": mergedRecursiveContents, + "blkio.time_recursive": timeRecursiveContents, + "blkio.sectors_recursive": sectorsRecursiveContents, + }) + + blkio := &BlkioGroup{} + actualStats := *cgroups.NewStats() + err := blkio.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatalf("Failed unexpectedly: %s", err) + } +} + +func TestBlkioStatsNoMergedFile(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_service_bytes_recursive": serviceBytesRecursiveContents, + "blkio.io_serviced_recursive": servicedRecursiveContents, + "blkio.io_queued_recursive": queuedRecursiveContents, + "blkio.io_service_time_recursive": serviceTimeRecursiveContents, + "blkio.io_wait_time_recursive": waitTimeRecursiveContents, + "blkio.time_recursive": timeRecursiveContents, + "blkio.sectors_recursive": sectorsRecursiveContents, + }) + + blkio := &BlkioGroup{} + actualStats := *cgroups.NewStats() + err := blkio.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatalf("Failed unexpectedly: %s", err) + } +} + +func TestBlkioStatsNoTimeFile(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_service_bytes_recursive": serviceBytesRecursiveContents, + "blkio.io_serviced_recursive": servicedRecursiveContents, + "blkio.io_queued_recursive": queuedRecursiveContents, + "blkio.io_service_time_recursive": serviceTimeRecursiveContents, + "blkio.io_wait_time_recursive": waitTimeRecursiveContents, + "blkio.io_merged_recursive": mergedRecursiveContents, + "blkio.sectors_recursive": sectorsRecursiveContents, + }) + + blkio := &BlkioGroup{} + actualStats := *cgroups.NewStats() + err := blkio.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatalf("Failed unexpectedly: %s", err) + } +} + +func TestBlkioStatsUnexpectedNumberOfFields(t *testing.T) { + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_service_bytes_recursive": "8:0 Read 100 100", + "blkio.io_serviced_recursive": servicedRecursiveContents, + "blkio.io_queued_recursive": queuedRecursiveContents, + "blkio.sectors_recursive": sectorsRecursiveContents, + "blkio.io_service_time_recursive": serviceTimeRecursiveContents, + "blkio.io_wait_time_recursive": waitTimeRecursiveContents, + "blkio.io_merged_recursive": mergedRecursiveContents, + "blkio.time_recursive": timeRecursiveContents, + }) + + blkio := &BlkioGroup{} + actualStats := *cgroups.NewStats() + err := blkio.GetStats(helper.CgroupPath, &actualStats) + if err == nil { + t.Fatal("Expected to fail, but did not") + } +} + +func TestBlkioStatsUnexpectedFieldType(t *testing.T) { + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_service_bytes_recursive": "8:0 Read Write", + "blkio.io_serviced_recursive": servicedRecursiveContents, + "blkio.io_queued_recursive": queuedRecursiveContents, + "blkio.sectors_recursive": sectorsRecursiveContents, + "blkio.io_service_time_recursive": serviceTimeRecursiveContents, + "blkio.io_wait_time_recursive": waitTimeRecursiveContents, + "blkio.io_merged_recursive": mergedRecursiveContents, + "blkio.time_recursive": timeRecursiveContents, + }) + + blkio := &BlkioGroup{} + actualStats := *cgroups.NewStats() + err := blkio.GetStats(helper.CgroupPath, &actualStats) + if err == nil { + t.Fatal("Expected to fail, but did not") + } +} + +func TestNonCFQBlkioStats(t *testing.T) { + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_service_bytes_recursive": "", + "blkio.io_serviced_recursive": "", + "blkio.io_queued_recursive": "", + "blkio.sectors_recursive": "", + "blkio.io_service_time_recursive": "", + "blkio.io_wait_time_recursive": "", + "blkio.io_merged_recursive": "", + "blkio.time_recursive": "", + "blkio.throttle.io_service_bytes": throttleServiceBytes, + "blkio.throttle.io_serviced": throttleServiced, + }) + + blkio := &BlkioGroup{} + actualStats := *cgroups.NewStats() + err := blkio.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatal(err) + } + + // Verify expected stats. + expectedStats := cgroups.BlkioStats{} + + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 11030528, "Read") + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 23, "Write") + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 42, "Sync") + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 11030528, "Async") + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 11030528, "Total") + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 252, 0, 11030528, "Read") + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 252, 0, 23, "Write") + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 252, 0, 42, "Sync") + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 252, 0, 11030528, "Async") + appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 252, 0, 11030528, "Total") + + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 164, "Read") + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 23, "Write") + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 42, "Sync") + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 164, "Async") + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 164, "Total") + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 252, 0, 164, "Read") + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 252, 0, 23, "Write") + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 252, 0, 42, "Sync") + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 252, 0, 164, "Async") + appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 252, 0, 164, "Total") + + expectBlkioStatsEquals(t, expectedStats, actualStats.BlkioStats) +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/cpu.go b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/cpu.go new file mode 100644 index 0000000000..efac9ed16a --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/cpu.go @@ -0,0 +1,72 @@ +package fs + +import ( + "bufio" + "os" + "path/filepath" + "strconv" + + "github.com/docker/libcontainer/cgroups" +) + +type CpuGroup struct { +} + +func (s *CpuGroup) Set(d *data) error { + // We always want to join the cpu group, to allow fair cpu scheduling + // on a container basis + dir, err := d.join("cpu") + if err != nil { + return err + } + if d.c.CpuShares != 0 { + if err := writeFile(dir, "cpu.shares", strconv.FormatInt(d.c.CpuShares, 10)); err != nil { + return err + } + } + if d.c.CpuPeriod != 0 { + if err := writeFile(dir, "cpu.cfs_period_us", strconv.FormatInt(d.c.CpuPeriod, 10)); err != nil { + return err + } + } + if d.c.CpuQuota != 0 { + if err := writeFile(dir, "cpu.cfs_quota_us", strconv.FormatInt(d.c.CpuQuota, 10)); err != nil { + return err + } + } + return nil +} + +func (s *CpuGroup) Remove(d *data) error { + return removePath(d.path("cpu")) +} + +func (s *CpuGroup) GetStats(path string, stats *cgroups.Stats) error { + f, err := os.Open(filepath.Join(path, "cpu.stat")) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + defer f.Close() + + sc := bufio.NewScanner(f) + for sc.Scan() { + t, v, err := getCgroupParamKeyValue(sc.Text()) + if err != nil { + return err + } + switch t { + case "nr_periods": + stats.CpuStats.ThrottlingData.Periods = v + + case "nr_throttled": + stats.CpuStats.ThrottlingData.ThrottledPeriods = v + + case "throttled_time": + stats.CpuStats.ThrottlingData.ThrottledTime = v + } + } + return nil +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/cpu_test.go b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/cpu_test.go new file mode 100644 index 0000000000..2470e68956 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/cpu_test.go @@ -0,0 +1,69 @@ +package fs + +import ( + "fmt" + "testing" + + "github.com/docker/libcontainer/cgroups" +) + +func TestCpuStats(t *testing.T) { + helper := NewCgroupTestUtil("cpu", t) + defer helper.cleanup() + + const ( + kNrPeriods = 2000 + kNrThrottled = 200 + kThrottledTime = uint64(18446744073709551615) + ) + + cpuStatContent := fmt.Sprintf("nr_periods %d\n nr_throttled %d\n throttled_time %d\n", + kNrPeriods, kNrThrottled, kThrottledTime) + helper.writeFileContents(map[string]string{ + "cpu.stat": cpuStatContent, + }) + + cpu := &CpuGroup{} + actualStats := *cgroups.NewStats() + err := cpu.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatal(err) + } + + expectedStats := cgroups.ThrottlingData{ + Periods: kNrPeriods, + ThrottledPeriods: kNrThrottled, + ThrottledTime: kThrottledTime} + + expectThrottlingDataEquals(t, expectedStats, actualStats.CpuStats.ThrottlingData) +} + +func TestNoCpuStatFile(t *testing.T) { + helper := NewCgroupTestUtil("cpu", t) + defer helper.cleanup() + + cpu := &CpuGroup{} + actualStats := *cgroups.NewStats() + err := cpu.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatal("Expected not to fail, but did") + } +} + +func TestInvalidCpuStat(t *testing.T) { + helper := NewCgroupTestUtil("cpu", t) + defer helper.cleanup() + cpuStatContent := `nr_periods 2000 + nr_throttled 200 + throttled_time fortytwo` + helper.writeFileContents(map[string]string{ + "cpu.stat": cpuStatContent, + }) + + cpu := &CpuGroup{} + actualStats := *cgroups.NewStats() + err := cpu.GetStats(helper.CgroupPath, &actualStats) + if err == nil { + t.Fatal("Expected failed stat parsing.") + } +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/cpuacct.go b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/cpuacct.go new file mode 100644 index 0000000000..14b55ccd4e --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/cpuacct.go @@ -0,0 +1,110 @@ +package fs + +import ( + "fmt" + "io/ioutil" + "path/filepath" + "strconv" + "strings" + + "github.com/docker/libcontainer/cgroups" + "github.com/docker/libcontainer/system" +) + +const ( + cgroupCpuacctStat = "cpuacct.stat" + nanosecondsInSecond = 1000000000 +) + +var clockTicks = uint64(system.GetClockTicks()) + +type CpuacctGroup struct { +} + +func (s *CpuacctGroup) Set(d *data) error { + // we just want to join this group even though we don't set anything + if _, err := d.join("cpuacct"); err != nil && !cgroups.IsNotFound(err) { + return err + } + + return nil +} + +func (s *CpuacctGroup) Remove(d *data) error { + return removePath(d.path("cpuacct")) +} + +func (s *CpuacctGroup) GetStats(path string, stats *cgroups.Stats) error { + userModeUsage, kernelModeUsage, err := getCpuUsageBreakdown(path) + if err != nil { + return err + } + + totalUsage, err := getCgroupParamUint(path, "cpuacct.usage") + if err != nil { + return err + } + + percpuUsage, err := getPercpuUsage(path) + if err != nil { + return err + } + + stats.CpuStats.CpuUsage.TotalUsage = totalUsage + stats.CpuStats.CpuUsage.PercpuUsage = percpuUsage + stats.CpuStats.CpuUsage.UsageInUsermode = userModeUsage + stats.CpuStats.CpuUsage.UsageInKernelmode = kernelModeUsage + return nil +} + +// Returns user and kernel usage breakdown in nanoseconds. +func getCpuUsageBreakdown(path string) (uint64, uint64, error) { + userModeUsage := uint64(0) + kernelModeUsage := uint64(0) + const ( + userField = "user" + systemField = "system" + ) + + // Expected format: + // user + // system + data, err := ioutil.ReadFile(filepath.Join(path, cgroupCpuacctStat)) + if err != nil { + return 0, 0, err + } + fields := strings.Fields(string(data)) + if len(fields) != 4 { + return 0, 0, fmt.Errorf("failure - %s is expected to have 4 fields", filepath.Join(path, cgroupCpuacctStat)) + } + if fields[0] != userField { + return 0, 0, fmt.Errorf("unexpected field %q in %q, expected %q", fields[0], cgroupCpuacctStat, userField) + } + if fields[2] != systemField { + return 0, 0, fmt.Errorf("unexpected field %q in %q, expected %q", fields[2], cgroupCpuacctStat, systemField) + } + if userModeUsage, err = strconv.ParseUint(fields[1], 10, 64); err != nil { + return 0, 0, err + } + if kernelModeUsage, err = strconv.ParseUint(fields[3], 10, 64); err != nil { + return 0, 0, err + } + + return (userModeUsage * nanosecondsInSecond) / clockTicks, (kernelModeUsage * nanosecondsInSecond) / clockTicks, nil +} + +func getPercpuUsage(path string) ([]uint64, error) { + percpuUsage := []uint64{} + data, err := ioutil.ReadFile(filepath.Join(path, "cpuacct.usage_percpu")) + if err != nil { + return percpuUsage, err + } + for _, value := range strings.Fields(string(data)) { + value, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return percpuUsage, fmt.Errorf("Unable to convert param value to uint64: %s", err) + } + percpuUsage = append(percpuUsage, value) + } + return percpuUsage, nil +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/cpuset.go b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/cpuset.go new file mode 100644 index 0000000000..54d2ed5725 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/cpuset.go @@ -0,0 +1,117 @@ +package fs + +import ( + "bytes" + "io/ioutil" + "os" + "path/filepath" + "strconv" + + "github.com/docker/libcontainer/cgroups" +) + +type CpusetGroup struct { +} + +func (s *CpusetGroup) Set(d *data) error { + dir, err := d.path("cpuset") + if err != nil { + return err + } + return s.SetDir(dir, d.c.CpusetCpus, d.pid) +} + +func (s *CpusetGroup) Remove(d *data) error { + return removePath(d.path("cpuset")) +} + +func (s *CpusetGroup) GetStats(path string, stats *cgroups.Stats) error { + return nil +} + +func (s *CpusetGroup) SetDir(dir, value string, pid int) error { + if err := s.ensureParent(dir); err != nil { + return err + } + + // because we are not using d.join we need to place the pid into the procs file + // unlike the other subsystems + if err := writeFile(dir, "cgroup.procs", strconv.Itoa(pid)); err != nil { + return err + } + + // If we don't use --cpuset, the default cpuset.cpus is set in + // s.ensureParent, otherwise, use the value we set + if value != "" { + if err := writeFile(dir, "cpuset.cpus", value); err != nil { + return err + } + } + + return nil +} + +func (s *CpusetGroup) getSubsystemSettings(parent string) (cpus []byte, mems []byte, err error) { + if cpus, err = ioutil.ReadFile(filepath.Join(parent, "cpuset.cpus")); err != nil { + return + } + if mems, err = ioutil.ReadFile(filepath.Join(parent, "cpuset.mems")); err != nil { + return + } + return cpus, mems, nil +} + +// ensureParent ensures that the parent directory of current is created +// with the proper cpus and mems files copied from it's parent if the values +// are a file with a new line char +func (s *CpusetGroup) ensureParent(current string) error { + parent := filepath.Dir(current) + + if _, err := os.Stat(parent); err != nil { + if !os.IsNotExist(err) { + return err + } + + if err := s.ensureParent(parent); err != nil { + return err + } + } + + if err := os.MkdirAll(current, 0755); err != nil && !os.IsExist(err) { + return err + } + return s.copyIfNeeded(current, parent) +} + +// copyIfNeeded copies the cpuset.cpus and cpuset.mems from the parent +// directory to the current directory if the file's contents are 0 +func (s *CpusetGroup) copyIfNeeded(current, parent string) error { + var ( + err error + currentCpus, currentMems []byte + parentCpus, parentMems []byte + ) + + if currentCpus, currentMems, err = s.getSubsystemSettings(current); err != nil { + return err + } + if parentCpus, parentMems, err = s.getSubsystemSettings(parent); err != nil { + return err + } + + if s.isEmpty(currentCpus) { + if err := writeFile(current, "cpuset.cpus", string(parentCpus)); err != nil { + return err + } + } + if s.isEmpty(currentMems) { + if err := writeFile(current, "cpuset.mems", string(parentMems)); err != nil { + return err + } + } + return nil +} + +func (s *CpusetGroup) isEmpty(b []byte) bool { + return len(bytes.Trim(b, "\n")) == 0 +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/devices.go b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/devices.go new file mode 100644 index 0000000000..98d5d2d7dd --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/devices.go @@ -0,0 +1,34 @@ +package fs + +import "github.com/docker/libcontainer/cgroups" + +type DevicesGroup struct { +} + +func (s *DevicesGroup) Set(d *data) error { + dir, err := d.join("devices") + if err != nil { + return err + } + + if !d.c.AllowAllDevices { + if err := writeFile(dir, "devices.deny", "a"); err != nil { + return err + } + + for _, dev := range d.c.AllowedDevices { + if err := writeFile(dir, "devices.allow", dev.GetCgroupAllowString()); err != nil { + return err + } + } + } + return nil +} + +func (s *DevicesGroup) Remove(d *data) error { + return removePath(d.path("devices")) +} + +func (s *DevicesGroup) GetStats(path string, stats *cgroups.Stats) error { + return nil +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/freezer.go b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/freezer.go new file mode 100644 index 0000000000..c6b677fa95 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/freezer.go @@ -0,0 +1,50 @@ +package fs + +import ( + "strings" + "time" + + "github.com/docker/libcontainer/cgroups" +) + +type FreezerGroup struct { +} + +func (s *FreezerGroup) Set(d *data) error { + switch d.c.Freezer { + case cgroups.Frozen, cgroups.Thawed: + dir, err := d.path("freezer") + if err != nil { + return err + } + + if err := writeFile(dir, "freezer.state", string(d.c.Freezer)); err != nil { + return err + } + + for { + state, err := readFile(dir, "freezer.state") + if err != nil { + return err + } + if strings.TrimSpace(state) == string(d.c.Freezer) { + break + } + time.Sleep(1 * time.Millisecond) + } + default: + if _, err := d.join("freezer"); err != nil && !cgroups.IsNotFound(err) { + return err + } + } + + return nil +} + +func (s *FreezerGroup) Remove(d *data) error { + return removePath(d.path("freezer")) +} + +func (s *FreezerGroup) GetStats(path string, stats *cgroups.Stats) error { + return nil +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/memory.go b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/memory.go new file mode 100644 index 0000000000..3f9647c2fd --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/memory.go @@ -0,0 +1,93 @@ +package fs + +import ( + "bufio" + "fmt" + "os" + "path/filepath" + "strconv" + + "github.com/docker/libcontainer/cgroups" +) + +type MemoryGroup struct { +} + +func (s *MemoryGroup) Set(d *data) error { + dir, err := d.join("memory") + // only return an error for memory if it was specified + if err != nil && (d.c.Memory != 0 || d.c.MemoryReservation != 0 || d.c.MemorySwap != 0) { + return err + } + defer func() { + if err != nil { + os.RemoveAll(dir) + } + }() + + // Only set values if some config was specified. + if d.c.Memory != 0 || d.c.MemoryReservation != 0 || d.c.MemorySwap != 0 { + if d.c.Memory != 0 { + if err := writeFile(dir, "memory.limit_in_bytes", strconv.FormatInt(d.c.Memory, 10)); err != nil { + return err + } + } + if d.c.MemoryReservation != 0 { + if err := writeFile(dir, "memory.soft_limit_in_bytes", strconv.FormatInt(d.c.MemoryReservation, 10)); err != nil { + return err + } + } + // By default, MemorySwap is set to twice the size of RAM. + // If you want to omit MemorySwap, set it to `-1'. + if d.c.MemorySwap != -1 { + if err := writeFile(dir, "memory.memsw.limit_in_bytes", strconv.FormatInt(d.c.Memory*2, 10)); err != nil { + return err + } + } + } + return nil +} + +func (s *MemoryGroup) Remove(d *data) error { + return removePath(d.path("memory")) +} + +func (s *MemoryGroup) GetStats(path string, stats *cgroups.Stats) error { + // Set stats from memory.stat. + statsFile, err := os.Open(filepath.Join(path, "memory.stat")) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + defer statsFile.Close() + + sc := bufio.NewScanner(statsFile) + for sc.Scan() { + t, v, err := getCgroupParamKeyValue(sc.Text()) + if err != nil { + return fmt.Errorf("failed to parse memory.stat (%q) - %v", sc.Text(), err) + } + stats.MemoryStats.Stats[t] = v + } + + // Set memory usage and max historical usage. + value, err := getCgroupParamUint(path, "memory.usage_in_bytes") + if err != nil { + return fmt.Errorf("failed to parse memory.usage_in_bytes - %v", err) + } + stats.MemoryStats.Usage = value + value, err = getCgroupParamUint(path, "memory.max_usage_in_bytes") + if err != nil { + return fmt.Errorf("failed to parse memory.max_usage_in_bytes - %v", err) + } + stats.MemoryStats.MaxUsage = value + value, err = getCgroupParamUint(path, "memory.failcnt") + if err != nil { + return fmt.Errorf("failed to parse memory.failcnt - %v", err) + } + stats.MemoryStats.Failcnt = value + + return nil +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/memory_test.go b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/memory_test.go new file mode 100644 index 0000000000..a21cec75c0 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/memory_test.go @@ -0,0 +1,134 @@ +package fs + +import ( + "testing" + + "github.com/docker/libcontainer/cgroups" +) + +const ( + memoryStatContents = `cache 512 +rss 1024` + memoryUsageContents = "2048\n" + memoryMaxUsageContents = "4096\n" + memoryFailcnt = "100\n" +) + +func TestMemoryStats(t *testing.T) { + helper := NewCgroupTestUtil("memory", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "memory.stat": memoryStatContents, + "memory.usage_in_bytes": memoryUsageContents, + "memory.max_usage_in_bytes": memoryMaxUsageContents, + "memory.failcnt": memoryFailcnt, + }) + + memory := &MemoryGroup{} + actualStats := *cgroups.NewStats() + err := memory.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatal(err) + } + expectedStats := cgroups.MemoryStats{Usage: 2048, MaxUsage: 4096, Failcnt: 100, Stats: map[string]uint64{"cache": 512, "rss": 1024}} + expectMemoryStatEquals(t, expectedStats, actualStats.MemoryStats) +} + +func TestMemoryStatsNoStatFile(t *testing.T) { + helper := NewCgroupTestUtil("memory", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "memory.usage_in_bytes": memoryUsageContents, + "memory.max_usage_in_bytes": memoryMaxUsageContents, + }) + + memory := &MemoryGroup{} + actualStats := *cgroups.NewStats() + err := memory.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatal(err) + } +} + +func TestMemoryStatsNoUsageFile(t *testing.T) { + helper := NewCgroupTestUtil("memory", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "memory.stat": memoryStatContents, + "memory.max_usage_in_bytes": memoryMaxUsageContents, + }) + + memory := &MemoryGroup{} + actualStats := *cgroups.NewStats() + err := memory.GetStats(helper.CgroupPath, &actualStats) + if err == nil { + t.Fatal("Expected failure") + } +} + +func TestMemoryStatsNoMaxUsageFile(t *testing.T) { + helper := NewCgroupTestUtil("memory", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "memory.stat": memoryStatContents, + "memory.usage_in_bytes": memoryUsageContents, + }) + + memory := &MemoryGroup{} + actualStats := *cgroups.NewStats() + err := memory.GetStats(helper.CgroupPath, &actualStats) + if err == nil { + t.Fatal("Expected failure") + } +} + +func TestMemoryStatsBadStatFile(t *testing.T) { + helper := NewCgroupTestUtil("memory", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "memory.stat": "rss rss", + "memory.usage_in_bytes": memoryUsageContents, + "memory.max_usage_in_bytes": memoryMaxUsageContents, + }) + + memory := &MemoryGroup{} + actualStats := *cgroups.NewStats() + err := memory.GetStats(helper.CgroupPath, &actualStats) + if err == nil { + t.Fatal("Expected failure") + } +} + +func TestMemoryStatsBadUsageFile(t *testing.T) { + helper := NewCgroupTestUtil("memory", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "memory.stat": memoryStatContents, + "memory.usage_in_bytes": "bad", + "memory.max_usage_in_bytes": memoryMaxUsageContents, + }) + + memory := &MemoryGroup{} + actualStats := *cgroups.NewStats() + err := memory.GetStats(helper.CgroupPath, &actualStats) + if err == nil { + t.Fatal("Expected failure") + } +} + +func TestMemoryStatsBadMaxUsageFile(t *testing.T) { + helper := NewCgroupTestUtil("memory", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "memory.stat": memoryStatContents, + "memory.usage_in_bytes": memoryUsageContents, + "memory.max_usage_in_bytes": "bad", + }) + + memory := &MemoryGroup{} + actualStats := *cgroups.NewStats() + err := memory.GetStats(helper.CgroupPath, &actualStats) + if err == nil { + t.Fatal("Expected failure") + } +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/notify_linux.go b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/notify_linux.go new file mode 100644 index 0000000000..d92063bade --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/notify_linux.go @@ -0,0 +1,82 @@ +// +build linux + +package fs + +import ( + "fmt" + "os" + "path/filepath" + "syscall" + + "github.com/docker/libcontainer/cgroups" +) + +// NotifyOnOOM sends signals on the returned channel when the cgroup reaches +// its memory limit. The channel is closed when the cgroup is removed. +func NotifyOnOOM(c *cgroups.Cgroup) (<-chan struct{}, error) { + d, err := getCgroupData(c, 0) + if err != nil { + return nil, err + } + + return notifyOnOOM(d) +} + +func notifyOnOOM(d *data) (<-chan struct{}, error) { + dir, err := d.path("memory") + if err != nil { + return nil, err + } + + fd, _, syserr := syscall.RawSyscall(syscall.SYS_EVENTFD2, 0, syscall.FD_CLOEXEC, 0) + if syserr != 0 { + return nil, syserr + } + + eventfd := os.NewFile(fd, "eventfd") + + oomControl, err := os.Open(filepath.Join(dir, "memory.oom_control")) + if err != nil { + eventfd.Close() + return nil, err + } + + var ( + eventControlPath = filepath.Join(dir, "cgroup.event_control") + data = fmt.Sprintf("%d %d", eventfd.Fd(), oomControl.Fd()) + ) + + if err := writeFile(dir, "cgroup.event_control", data); err != nil { + eventfd.Close() + oomControl.Close() + return nil, err + } + + ch := make(chan struct{}) + + go func() { + defer func() { + close(ch) + eventfd.Close() + oomControl.Close() + }() + + buf := make([]byte, 8) + + for { + if _, err := eventfd.Read(buf); err != nil { + return + } + + // When a cgroup is destroyed, an event is sent to eventfd. + // So if the control path is gone, return instead of notifying. + if _, err := os.Lstat(eventControlPath); os.IsNotExist(err) { + return + } + + ch <- struct{}{} + } + }() + + return ch, nil +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/notify_linux_test.go b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/notify_linux_test.go new file mode 100644 index 0000000000..a11880cb66 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/notify_linux_test.go @@ -0,0 +1,86 @@ +// +build linux + +package fs + +import ( + "encoding/binary" + "fmt" + "syscall" + "testing" + "time" +) + +func TestNotifyOnOOM(t *testing.T) { + helper := NewCgroupTestUtil("memory", t) + defer helper.cleanup() + + helper.writeFileContents(map[string]string{ + "memory.oom_control": "", + "cgroup.event_control": "", + }) + + var eventFd, oomControlFd int + + ooms, err := notifyOnOOM(helper.CgroupData) + if err != nil { + t.Fatal("expected no error, got:", err) + } + + memoryPath, _ := helper.CgroupData.path("memory") + data, err := readFile(memoryPath, "cgroup.event_control") + if err != nil { + t.Fatal("couldn't read event control file:", err) + } + + if _, err := fmt.Sscanf(data, "%d %d", &eventFd, &oomControlFd); err != nil { + t.Fatalf("invalid control data %q: %s", data, err) + } + + // re-open the eventfd + efd, err := syscall.Dup(eventFd) + if err != nil { + t.Fatal("unable to reopen eventfd:", err) + } + defer syscall.Close(efd) + + if err != nil { + t.Fatal("unable to dup event fd:", err) + } + + buf := make([]byte, 8) + binary.LittleEndian.PutUint64(buf, 1) + + if _, err := syscall.Write(efd, buf); err != nil { + t.Fatal("unable to write to eventfd:", err) + } + + select { + case <-ooms: + case <-time.After(100 * time.Millisecond): + t.Fatal("no notification on oom channel after 100ms") + } + + // simulate what happens when a cgroup is destroyed by cleaning up and then + // writing to the eventfd. + helper.cleanup() + if _, err := syscall.Write(efd, buf); err != nil { + t.Fatal("unable to write to eventfd:", err) + } + + // give things a moment to shut down + select { + case _, ok := <-ooms: + if ok { + t.Fatal("expected no oom to be triggered") + } + case <-time.After(100 * time.Millisecond): + } + + if _, _, err := syscall.Syscall(syscall.SYS_FCNTL, uintptr(oomControlFd), syscall.F_GETFD, 0); err != syscall.EBADF { + t.Error("expected oom control to be closed") + } + + if _, _, err := syscall.Syscall(syscall.SYS_FCNTL, uintptr(eventFd), syscall.F_GETFD, 0); err != syscall.EBADF { + t.Error("expected event fd to be closed") + } +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/perf_event.go b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/perf_event.go new file mode 100644 index 0000000000..813274d8cb --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/perf_event.go @@ -0,0 +1,24 @@ +package fs + +import ( + "github.com/docker/libcontainer/cgroups" +) + +type PerfEventGroup struct { +} + +func (s *PerfEventGroup) Set(d *data) error { + // we just want to join this group even though we don't set anything + if _, err := d.join("perf_event"); err != nil && !cgroups.IsNotFound(err) { + return err + } + return nil +} + +func (s *PerfEventGroup) Remove(d *data) error { + return removePath(d.path("perf_event")) +} + +func (s *PerfEventGroup) GetStats(path string, stats *cgroups.Stats) error { + return nil +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/stats_util_test.go b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/stats_util_test.go new file mode 100644 index 0000000000..1a9e590f59 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/stats_util_test.go @@ -0,0 +1,93 @@ +package fs + +import ( + "fmt" + "log" + "testing" + + "github.com/docker/libcontainer/cgroups" +) + +func blkioStatEntryEquals(expected, actual []cgroups.BlkioStatEntry) error { + if len(expected) != len(actual) { + return fmt.Errorf("blkioStatEntries length do not match") + } + for i, expValue := range expected { + actValue := actual[i] + if expValue != actValue { + return fmt.Errorf("Expected blkio stat entry %v but found %v", expValue, actValue) + } + } + return nil +} + +func expectBlkioStatsEquals(t *testing.T, expected, actual cgroups.BlkioStats) { + if err := blkioStatEntryEquals(expected.IoServiceBytesRecursive, actual.IoServiceBytesRecursive); err != nil { + log.Printf("blkio IoServiceBytesRecursive do not match - %s\n", err) + t.Fail() + } + + if err := blkioStatEntryEquals(expected.IoServicedRecursive, actual.IoServicedRecursive); err != nil { + log.Printf("blkio IoServicedRecursive do not match - %s\n", err) + t.Fail() + } + + if err := blkioStatEntryEquals(expected.IoQueuedRecursive, actual.IoQueuedRecursive); err != nil { + log.Printf("blkio IoQueuedRecursive do not match - %s\n", err) + t.Fail() + } + + if err := blkioStatEntryEquals(expected.SectorsRecursive, actual.SectorsRecursive); err != nil { + log.Printf("blkio SectorsRecursive do not match - %s\n", err) + t.Fail() + } + + if err := blkioStatEntryEquals(expected.IoServiceTimeRecursive, actual.IoServiceTimeRecursive); err != nil { + log.Printf("blkio IoServiceTimeRecursive do not match - %s\n", err) + t.Fail() + } + + if err := blkioStatEntryEquals(expected.IoWaitTimeRecursive, actual.IoWaitTimeRecursive); err != nil { + log.Printf("blkio IoWaitTimeRecursive do not match - %s\n", err) + t.Fail() + } + + if err := blkioStatEntryEquals(expected.IoMergedRecursive, actual.IoMergedRecursive); err != nil { + log.Printf("blkio IoMergedRecursive do not match - %s vs %s\n", expected.IoMergedRecursive, actual.IoMergedRecursive) + t.Fail() + } + + if err := blkioStatEntryEquals(expected.IoTimeRecursive, actual.IoTimeRecursive); err != nil { + log.Printf("blkio IoTimeRecursive do not match - %s\n", err) + t.Fail() + } +} + +func expectThrottlingDataEquals(t *testing.T, expected, actual cgroups.ThrottlingData) { + if expected != actual { + log.Printf("Expected throttling data %v but found %v\n", expected, actual) + t.Fail() + } +} + +func expectMemoryStatEquals(t *testing.T, expected, actual cgroups.MemoryStats) { + if expected.Usage != actual.Usage { + log.Printf("Expected memory usage %d but found %d\n", expected.Usage, actual.Usage) + t.Fail() + } + if expected.MaxUsage != actual.MaxUsage { + log.Printf("Expected memory max usage %d but found %d\n", expected.MaxUsage, actual.MaxUsage) + t.Fail() + } + for key, expValue := range expected.Stats { + actValue, ok := actual.Stats[key] + if !ok { + log.Printf("Expected memory stat key %s not found\n", key) + t.Fail() + } + if expValue != actValue { + log.Printf("Expected memory stat value %d but found %d\n", expValue, actValue) + t.Fail() + } + } +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/util_test.go b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/util_test.go new file mode 100644 index 0000000000..548870a8a3 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/util_test.go @@ -0,0 +1,60 @@ +/* +Utility for testing cgroup operations. + +Creates a mock of the cgroup filesystem for the duration of the test. +*/ +package fs + +import ( + "fmt" + "io/ioutil" + "os" + "testing" +) + +type cgroupTestUtil struct { + // data to use in tests. + CgroupData *data + + // Path to the mock cgroup directory. + CgroupPath string + + // Temporary directory to store mock cgroup filesystem. + tempDir string + t *testing.T +} + +// Creates a new test util for the specified subsystem +func NewCgroupTestUtil(subsystem string, t *testing.T) *cgroupTestUtil { + d := &data{} + tempDir, err := ioutil.TempDir("", fmt.Sprintf("%s_cgroup_test", subsystem)) + if err != nil { + t.Fatal(err) + } + d.root = tempDir + testCgroupPath, err := d.path(subsystem) + if err != nil { + t.Fatal(err) + } + + // Ensure the full mock cgroup path exists. + err = os.MkdirAll(testCgroupPath, 0755) + if err != nil { + t.Fatal(err) + } + return &cgroupTestUtil{CgroupData: d, CgroupPath: testCgroupPath, tempDir: tempDir, t: t} +} + +func (c *cgroupTestUtil) cleanup() { + os.RemoveAll(c.tempDir) +} + +// Write the specified contents on the mock of the specified cgroup files. +func (c *cgroupTestUtil) writeFileContents(fileContents map[string]string) { + for file, contents := range fileContents { + err := writeFile(c.CgroupPath, file, contents) + if err != nil { + c.t.Fatal(err) + } + } +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/utils.go b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/utils.go new file mode 100644 index 0000000000..f37a3a485a --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/utils.go @@ -0,0 +1,62 @@ +package fs + +import ( + "errors" + "fmt" + "io/ioutil" + "path/filepath" + "strconv" + "strings" +) + +var ( + ErrNotSupportStat = errors.New("stats are not supported for subsystem") + ErrNotValidFormat = errors.New("line is not a valid key value format") +) + +// Saturates negative values at zero and returns a uint64. +// Due to kernel bugs, some of the memory cgroup stats can be negative. +func parseUint(s string, base, bitSize int) (uint64, error) { + value, err := strconv.ParseUint(s, base, bitSize) + if err != nil { + intValue, intErr := strconv.ParseInt(s, base, bitSize) + // 1. Handle negative values greater than MinInt64 (and) + // 2. Handle negative values lesser than MinInt64 + if intErr == nil && intValue < 0 { + return 0, nil + } else if intErr != nil && intErr.(*strconv.NumError).Err == strconv.ErrRange && intValue < 0 { + return 0, nil + } + + return value, err + } + + return value, nil +} + +// Parses a cgroup param and returns as name, value +// i.e. "io_service_bytes 1234" will return as io_service_bytes, 1234 +func getCgroupParamKeyValue(t string) (string, uint64, error) { + parts := strings.Fields(t) + switch len(parts) { + case 2: + value, err := parseUint(parts[1], 10, 64) + if err != nil { + return "", 0, fmt.Errorf("Unable to convert param value (%q) to uint64: %v", parts[1], err) + } + + return parts[0], value, nil + default: + return "", 0, ErrNotValidFormat + } +} + +// Gets a single uint64 value from the specified cgroup file. +func getCgroupParamUint(cgroupPath, cgroupFile string) (uint64, error) { + contents, err := ioutil.ReadFile(filepath.Join(cgroupPath, cgroupFile)) + if err != nil { + return 0, err + } + + return parseUint(strings.TrimSpace(string(contents)), 10, 64) +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/utils_test.go b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/utils_test.go new file mode 100644 index 0000000000..8b19a84b27 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/fs/utils_test.go @@ -0,0 +1,95 @@ +package fs + +import ( + "io/ioutil" + "math" + "os" + "path/filepath" + "strconv" + "testing" +) + +const ( + cgroupFile = "cgroup.file" + floatValue = 2048.0 + floatString = "2048" +) + +func TestGetCgroupParamsInt(t *testing.T) { + // Setup tempdir. + tempDir, err := ioutil.TempDir("", "cgroup_utils_test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempDir) + tempFile := filepath.Join(tempDir, cgroupFile) + + // Success. + err = ioutil.WriteFile(tempFile, []byte(floatString), 0755) + if err != nil { + t.Fatal(err) + } + value, err := getCgroupParamUint(tempDir, cgroupFile) + if err != nil { + t.Fatal(err) + } else if value != floatValue { + t.Fatalf("Expected %d to equal %f", value, floatValue) + } + + // Success with new line. + err = ioutil.WriteFile(tempFile, []byte(floatString+"\n"), 0755) + if err != nil { + t.Fatal(err) + } + value, err = getCgroupParamUint(tempDir, cgroupFile) + if err != nil { + t.Fatal(err) + } else if value != floatValue { + t.Fatalf("Expected %d to equal %f", value, floatValue) + } + + // Success with negative values + err = ioutil.WriteFile(tempFile, []byte("-12345"), 0755) + if err != nil { + t.Fatal(err) + } + value, err = getCgroupParamUint(tempDir, cgroupFile) + if err != nil { + t.Fatal(err) + } else if value != 0 { + t.Fatalf("Expected %d to equal %d", value, 0) + } + + // Success with negative values lesser than min int64 + s := strconv.FormatFloat(math.MinInt64, 'f', -1, 64) + err = ioutil.WriteFile(tempFile, []byte(s), 0755) + if err != nil { + t.Fatal(err) + } + value, err = getCgroupParamUint(tempDir, cgroupFile) + if err != nil { + t.Fatal(err) + } else if value != 0 { + t.Fatalf("Expected %d to equal %d", value, 0) + } + + // Not a float. + err = ioutil.WriteFile(tempFile, []byte("not-a-float"), 0755) + if err != nil { + t.Fatal(err) + } + _, err = getCgroupParamUint(tempDir, cgroupFile) + if err == nil { + t.Fatal("Expecting error, got none") + } + + // Unknown file. + err = os.Remove(tempFile) + if err != nil { + t.Fatal(err) + } + _, err = getCgroupParamUint(tempDir, cgroupFile) + if err == nil { + t.Fatal("Expecting error, got none") + } +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/stats.go b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/stats.go new file mode 100644 index 0000000000..dc5dbb3c21 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/stats.go @@ -0,0 +1,73 @@ +package cgroups + +type ThrottlingData struct { + // Number of periods with throttling active + Periods uint64 `json:"periods,omitempty"` + // Number of periods when the container hit its throttling limit. + ThrottledPeriods uint64 `json:"throttled_periods,omitempty"` + // Aggregate time the container was throttled for in nanoseconds. + ThrottledTime uint64 `json:"throttled_time,omitempty"` +} + +// All CPU stats are aggregate since container inception. +type CpuUsage struct { + // Total CPU time consumed. + // Units: nanoseconds. + TotalUsage uint64 `json:"total_usage,omitempty"` + // Total CPU time consumed per core. + // Units: nanoseconds. + PercpuUsage []uint64 `json:"percpu_usage,omitempty"` + // Time spent by tasks of the cgroup in kernel mode. + // Units: nanoseconds. + UsageInKernelmode uint64 `json:"usage_in_kernelmode"` + // Time spent by tasks of the cgroup in user mode. + // Units: nanoseconds. + UsageInUsermode uint64 `json:"usage_in_usermode"` +} + +type CpuStats struct { + CpuUsage CpuUsage `json:"cpu_usage,omitempty"` + ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` +} + +type MemoryStats struct { + // current res_counter usage for memory + Usage uint64 `json:"usage,omitempty"` + // maximum usage ever recorded. + MaxUsage uint64 `json:"max_usage,omitempty"` + // TODO(vishh): Export these as stronger types. + // all the stats exported via memory.stat. + Stats map[string]uint64 `json:"stats,omitempty"` + // number of times memory usage hits limits. + Failcnt uint64 `json:"failcnt"` +} + +type BlkioStatEntry struct { + Major uint64 `json:"major,omitempty"` + Minor uint64 `json:"minor,omitempty"` + Op string `json:"op,omitempty"` + Value uint64 `json:"value,omitempty"` +} + +type BlkioStats struct { + // number of bytes tranferred to and from the block device + IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive,omitempty"` + IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive,omitempty"` + IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive,omitempty"` + IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive,omitempty"` + IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive,omitempty"` + IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive,omitempty"` + IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive,omitempty"` + SectorsRecursive []BlkioStatEntry `json:"sectors_recursive,omitempty"` +} + +type Stats struct { + CpuStats CpuStats `json:"cpu_stats,omitempty"` + MemoryStats MemoryStats `json:"memory_stats,omitempty"` + BlkioStats BlkioStats `json:"blkio_stats,omitempty"` +} + +func NewStats() *Stats { + memoryStats := MemoryStats{Stats: make(map[string]uint64)} + return &Stats{MemoryStats: memoryStats} +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/systemd/apply_nosystemd.go b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/systemd/apply_nosystemd.go new file mode 100644 index 0000000000..4b9a2f5b74 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/systemd/apply_nosystemd.go @@ -0,0 +1,29 @@ +// +build !linux + +package systemd + +import ( + "fmt" + + "github.com/docker/libcontainer/cgroups" +) + +func UseSystemd() bool { + return false +} + +func Apply(c *cgroups.Cgroup, pid int) (map[string]string, error) { + return nil, fmt.Errorf("Systemd not supported") +} + +func GetPids(c *cgroups.Cgroup) ([]int, error) { + return nil, fmt.Errorf("Systemd not supported") +} + +func ApplyDevices(c *cgroups.Cgroup, pid int) error { + return fmt.Errorf("Systemd not supported") +} + +func Freeze(c *cgroups.Cgroup, state cgroups.FreezerState) error { + return fmt.Errorf("Systemd not supported") +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/systemd/apply_systemd.go b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/systemd/apply_systemd.go new file mode 100644 index 0000000000..3d89811433 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/systemd/apply_systemd.go @@ -0,0 +1,317 @@ +// +build linux + +package systemd + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "time" + + systemd "github.com/coreos/go-systemd/dbus" + "github.com/docker/libcontainer/cgroups" + "github.com/docker/libcontainer/cgroups/fs" + "github.com/godbus/dbus" +) + +type systemdCgroup struct { + cgroup *cgroups.Cgroup +} + +type subsystem interface { + GetStats(string, *cgroups.Stats) error +} + +var ( + connLock sync.Mutex + theConn *systemd.Conn + hasStartTransientUnit bool +) + +func newProp(name string, units interface{}) systemd.Property { + return systemd.Property{ + Name: name, + Value: dbus.MakeVariant(units), + } +} + +func UseSystemd() bool { + s, err := os.Stat("/run/systemd/system") + if err != nil || !s.IsDir() { + return false + } + + connLock.Lock() + defer connLock.Unlock() + + if theConn == nil { + var err error + theConn, err = systemd.New() + if err != nil { + return false + } + + // Assume we have StartTransientUnit + hasStartTransientUnit = true + + // But if we get UnknownMethod error we don't + if _, err := theConn.StartTransientUnit("test.scope", "invalid"); err != nil { + if dbusError, ok := err.(dbus.Error); ok { + if dbusError.Name == "org.freedesktop.DBus.Error.UnknownMethod" { + hasStartTransientUnit = false + } + } + } + } + return hasStartTransientUnit +} + +func getIfaceForUnit(unitName string) string { + if strings.HasSuffix(unitName, ".scope") { + return "Scope" + } + if strings.HasSuffix(unitName, ".service") { + return "Service" + } + return "Unit" +} + +func Apply(c *cgroups.Cgroup, pid int) (map[string]string, error) { + var ( + unitName = getUnitName(c) + slice = "system.slice" + properties []systemd.Property + res = &systemdCgroup{} + ) + + res.cgroup = c + + if c.Slice != "" { + slice = c.Slice + } + + properties = append(properties, + systemd.PropSlice(slice), + systemd.PropDescription("docker container "+c.Name), + newProp("PIDs", []uint32{uint32(pid)}), + ) + + // Always enable accounting, this gets us the same behaviour as the fs implementation, + // plus the kernel has some problems with joining the memory cgroup at a later time. + properties = append(properties, + newProp("MemoryAccounting", true), + newProp("CPUAccounting", true), + newProp("BlockIOAccounting", true)) + + if c.Memory != 0 { + properties = append(properties, + newProp("MemoryLimit", uint64(c.Memory))) + } + // TODO: MemoryReservation and MemorySwap not available in systemd + + if c.CpuShares != 0 { + properties = append(properties, + newProp("CPUShares", uint64(c.CpuShares))) + } + + if _, err := theConn.StartTransientUnit(unitName, "replace", properties...); err != nil { + return nil, err + } + + if !c.AllowAllDevices { + if err := joinDevices(c, pid); err != nil { + return nil, err + } + } + + // -1 disables memorySwap + if c.MemorySwap >= 0 && (c.Memory != 0 || c.MemorySwap > 0) { + if err := joinMemory(c, pid); err != nil { + return nil, err + } + + } + + // we need to manually join the freezer and cpuset cgroup in systemd + // because it does not currently support it via the dbus api. + if err := joinFreezer(c, pid); err != nil { + return nil, err + } + + if err := joinCpuset(c, pid); err != nil { + return nil, err + } + + paths := make(map[string]string) + for _, sysname := range []string{ + "devices", + "memory", + "cpu", + "cpuset", + "cpuacct", + "blkio", + "perf_event", + "freezer", + } { + subsystemPath, err := getSubsystemPath(res.cgroup, sysname) + if err != nil { + // Don't fail if a cgroup hierarchy was not found, just skip this subsystem + if cgroups.IsNotFound(err) { + continue + } + return nil, err + } + paths[sysname] = subsystemPath + } + return paths, nil +} + +func writeFile(dir, file, data string) error { + return ioutil.WriteFile(filepath.Join(dir, file), []byte(data), 0700) +} + +func joinFreezer(c *cgroups.Cgroup, pid int) error { + path, err := getSubsystemPath(c, "freezer") + if err != nil { + return err + } + + if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) { + return err + } + + return ioutil.WriteFile(filepath.Join(path, "cgroup.procs"), []byte(strconv.Itoa(pid)), 0700) +} + +func getSubsystemPath(c *cgroups.Cgroup, subsystem string) (string, error) { + mountpoint, err := cgroups.FindCgroupMountpoint(subsystem) + if err != nil { + return "", err + } + + initPath, err := cgroups.GetInitCgroupDir(subsystem) + if err != nil { + return "", err + } + + slice := "system.slice" + if c.Slice != "" { + slice = c.Slice + } + + return filepath.Join(mountpoint, initPath, slice, getUnitName(c)), nil +} + +func Freeze(c *cgroups.Cgroup, state cgroups.FreezerState) error { + path, err := getSubsystemPath(c, "freezer") + if err != nil { + return err + } + + if err := ioutil.WriteFile(filepath.Join(path, "freezer.state"), []byte(state), 0); err != nil { + return err + } + for { + state_, err := ioutil.ReadFile(filepath.Join(path, "freezer.state")) + if err != nil { + return err + } + if string(state) == string(bytes.TrimSpace(state_)) { + break + } + time.Sleep(1 * time.Millisecond) + } + return nil +} + +func GetPids(c *cgroups.Cgroup) ([]int, error) { + path, err := getSubsystemPath(c, "cpu") + if err != nil { + return nil, err + } + + return cgroups.ReadProcsFile(path) +} + +func getUnitName(c *cgroups.Cgroup) string { + return fmt.Sprintf("%s-%s.scope", c.Parent, c.Name) +} + +// Atm we can't use the systemd device support because of two missing things: +// * Support for wildcards to allow mknod on any device +// * Support for wildcards to allow /dev/pts support +// +// The second is available in more recent systemd as "char-pts", but not in e.g. v208 which is +// in wide use. When both these are availalable we will be able to switch, but need to keep the old +// implementation for backwards compat. +// +// Note: we can't use systemd to set up the initial limits, and then change the cgroup +// because systemd will re-write the device settings if it needs to re-apply the cgroup context. +// This happens at least for v208 when any sibling unit is started. +func joinDevices(c *cgroups.Cgroup, pid int) error { + path, err := getSubsystemPath(c, "devices") + if err != nil { + return err + } + + if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) { + return err + } + + if err := ioutil.WriteFile(filepath.Join(path, "cgroup.procs"), []byte(strconv.Itoa(pid)), 0700); err != nil { + return err + } + + if err := writeFile(path, "devices.deny", "a"); err != nil { + return err + } + + for _, dev := range c.AllowedDevices { + if err := writeFile(path, "devices.allow", dev.GetCgroupAllowString()); err != nil { + return err + } + } + + return nil +} + +// Symmetrical public function to update device based cgroups. Also available +// in the fs implementation. +func ApplyDevices(c *cgroups.Cgroup, pid int) error { + return joinDevices(c, pid) +} + +func joinMemory(c *cgroups.Cgroup, pid int) error { + memorySwap := c.MemorySwap + + if memorySwap == 0 { + // By default, MemorySwap is set to twice the size of RAM. + memorySwap = c.Memory * 2 + } + + path, err := getSubsystemPath(c, "memory") + if err != nil { + return err + } + + return ioutil.WriteFile(filepath.Join(path, "memory.memsw.limit_in_bytes"), []byte(strconv.FormatInt(memorySwap, 10)), 0700) +} + +// systemd does not atm set up the cpuset controller, so we must manually +// join it. Additionally that is a very finicky controller where each +// level must have a full setup as the default for a new directory is "no cpus" +func joinCpuset(c *cgroups.Cgroup, pid int) error { + path, err := getSubsystemPath(c, "cpuset") + if err != nil { + return err + } + + s := &fs.CpusetGroup{} + + return s.SetDir(path, c.CpusetCpus, pid) +} diff --git a/tests/_vendor/src/github.com/docker/libcontainer/cgroups/utils.go b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/utils.go similarity index 91% rename from tests/_vendor/src/github.com/docker/libcontainer/cgroups/utils.go rename to tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/utils.go index 77a3c0d7c5..224a20b9b2 100644 --- a/tests/_vendor/src/github.com/docker/libcontainer/cgroups/utils.go +++ b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/cgroups/utils.go @@ -189,6 +189,17 @@ func EnterPid(cgroupPaths map[string]string, pid int) error { } } } - return nil } + +// RemovePaths iterates over the provided paths removing them. +// If an error is encountered the removal proceeds and the first error is +// returned to ensure a partial removal is not possible. +func RemovePaths(paths map[string]string) (err error) { + for _, path := range paths { + if rerr := os.RemoveAll(path); err == nil { + err = rerr + } + } + return err +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/libcontainer/devices/defaults.go b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/devices/defaults.go new file mode 100644 index 0000000000..e0ad0b08f8 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/devices/defaults.go @@ -0,0 +1,159 @@ +package devices + +var ( + // These are devices that are to be both allowed and created. + + DefaultSimpleDevices = []*Device{ + // /dev/null and zero + { + Path: "/dev/null", + Type: 'c', + MajorNumber: 1, + MinorNumber: 3, + CgroupPermissions: "rwm", + FileMode: 0666, + }, + { + Path: "/dev/zero", + Type: 'c', + MajorNumber: 1, + MinorNumber: 5, + CgroupPermissions: "rwm", + FileMode: 0666, + }, + + { + Path: "/dev/full", + Type: 'c', + MajorNumber: 1, + MinorNumber: 7, + CgroupPermissions: "rwm", + FileMode: 0666, + }, + + // consoles and ttys + { + Path: "/dev/tty", + Type: 'c', + MajorNumber: 5, + MinorNumber: 0, + CgroupPermissions: "rwm", + FileMode: 0666, + }, + + // /dev/urandom,/dev/random + { + Path: "/dev/urandom", + Type: 'c', + MajorNumber: 1, + MinorNumber: 9, + CgroupPermissions: "rwm", + FileMode: 0666, + }, + { + Path: "/dev/random", + Type: 'c', + MajorNumber: 1, + MinorNumber: 8, + CgroupPermissions: "rwm", + FileMode: 0666, + }, + } + + DefaultAllowedDevices = append([]*Device{ + // allow mknod for any device + { + Type: 'c', + MajorNumber: Wildcard, + MinorNumber: Wildcard, + CgroupPermissions: "m", + }, + { + Type: 'b', + MajorNumber: Wildcard, + MinorNumber: Wildcard, + CgroupPermissions: "m", + }, + + { + Path: "/dev/console", + Type: 'c', + MajorNumber: 5, + MinorNumber: 1, + CgroupPermissions: "rwm", + }, + { + Path: "/dev/tty0", + Type: 'c', + MajorNumber: 4, + MinorNumber: 0, + CgroupPermissions: "rwm", + }, + { + Path: "/dev/tty1", + Type: 'c', + MajorNumber: 4, + MinorNumber: 1, + CgroupPermissions: "rwm", + }, + // /dev/pts/ - pts namespaces are "coming soon" + { + Path: "", + Type: 'c', + MajorNumber: 136, + MinorNumber: Wildcard, + CgroupPermissions: "rwm", + }, + { + Path: "", + Type: 'c', + MajorNumber: 5, + MinorNumber: 2, + CgroupPermissions: "rwm", + }, + + // tuntap + { + Path: "", + Type: 'c', + MajorNumber: 10, + MinorNumber: 200, + CgroupPermissions: "rwm", + }, + + /*// fuse + { + Path: "", + Type: 'c', + MajorNumber: 10, + MinorNumber: 229, + CgroupPermissions: "rwm", + }, + + // rtc + { + Path: "", + Type: 'c', + MajorNumber: 254, + MinorNumber: 0, + CgroupPermissions: "rwm", + }, + */ + }, DefaultSimpleDevices...) + + DefaultAutoCreatedDevices = append([]*Device{ + { + // /dev/fuse is created but not allowed. + // This is to allow java to work. Because java + // Insists on there being a /dev/fuse + // https://github.com/docker/docker/issues/514 + // https://github.com/docker/docker/issues/2393 + // + Path: "/dev/fuse", + Type: 'c', + MajorNumber: 10, + MinorNumber: 229, + CgroupPermissions: "rwm", + }, + }, DefaultSimpleDevices...) +) diff --git a/tests/_vendor/src/github.com/docker/libcontainer/devices/devices.go b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/devices/devices.go similarity index 98% rename from tests/_vendor/src/github.com/docker/libcontainer/devices/devices.go rename to tests/Godeps/_workspace/src/github.com/docker/libcontainer/devices/devices.go index 5bf80e8cd4..8e86d95292 100644 --- a/tests/_vendor/src/github.com/docker/libcontainer/devices/devices.go +++ b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/devices/devices.go @@ -103,7 +103,7 @@ func getDeviceNodes(path string) ([]*Device, error) { switch { case f.IsDir(): switch f.Name() { - case "pts", "shm", "fd": + case "pts", "shm", "fd", "mqueue": continue default: sub, err := getDeviceNodes(filepath.Join(path, f.Name())) diff --git a/tests/Godeps/_workspace/src/github.com/docker/libcontainer/devices/devices_test.go b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/devices/devices_test.go new file mode 100644 index 0000000000..fec4002237 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/devices/devices_test.go @@ -0,0 +1,61 @@ +package devices + +import ( + "errors" + "os" + "testing" +) + +func TestGetDeviceLstatFailure(t *testing.T) { + testError := errors.New("test error") + + // Override os.Lstat to inject error. + osLstat = func(path string) (os.FileInfo, error) { + return nil, testError + } + + _, err := GetDevice("", "") + if err != testError { + t.Fatalf("Unexpected error %v, expected %v", err, testError) + } +} + +func TestGetHostDeviceNodesIoutilReadDirFailure(t *testing.T) { + testError := errors.New("test error") + + // Override ioutil.ReadDir to inject error. + ioutilReadDir = func(dirname string) ([]os.FileInfo, error) { + return nil, testError + } + + _, err := GetHostDeviceNodes() + if err != testError { + t.Fatalf("Unexpected error %v, expected %v", err, testError) + } +} + +func TestGetHostDeviceNodesIoutilReadDirDeepFailure(t *testing.T) { + testError := errors.New("test error") + called := false + + // Override ioutil.ReadDir to inject error after the first call. + ioutilReadDir = func(dirname string) ([]os.FileInfo, error) { + if called { + return nil, testError + } + called = true + + // Provoke a second call. + fi, err := os.Lstat("/tmp") + if err != nil { + t.Fatalf("Unexpected error %v", err) + } + + return []os.FileInfo{fi}, nil + } + + _, err := GetHostDeviceNodes() + if err != testError { + t.Fatalf("Unexpected error %v, expected %v", err, testError) + } +} diff --git a/tests/_vendor/src/github.com/docker/libcontainer/devices/number.go b/tests/Godeps/_workspace/src/github.com/docker/libcontainer/devices/number.go similarity index 100% rename from tests/_vendor/src/github.com/docker/libcontainer/devices/number.go rename to tests/Godeps/_workspace/src/github.com/docker/libcontainer/devices/number.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/libtrust/CONTRIBUTING.md b/tests/Godeps/_workspace/src/github.com/docker/libtrust/CONTRIBUTING.md new file mode 100644 index 0000000000..05be0f8ab3 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/libtrust/CONTRIBUTING.md @@ -0,0 +1,13 @@ +# Contributing to libtrust + +Want to hack on libtrust? Awesome! Here are instructions to get you +started. + +libtrust is a part of the [Docker](https://www.docker.com) project, and follows +the same rules and principles. If you're already familiar with the way +Docker does things, you'll feel right at home. + +Otherwise, go read +[Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md). + +Happy hacking! diff --git a/tests/_vendor/src/github.com/docker/docker/LICENSE b/tests/Godeps/_workspace/src/github.com/docker/libtrust/LICENSE similarity index 100% rename from tests/_vendor/src/github.com/docker/docker/LICENSE rename to tests/Godeps/_workspace/src/github.com/docker/libtrust/LICENSE diff --git a/tests/Godeps/_workspace/src/github.com/docker/libtrust/MAINTAINERS b/tests/Godeps/_workspace/src/github.com/docker/libtrust/MAINTAINERS new file mode 100644 index 0000000000..9768175feb --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/libtrust/MAINTAINERS @@ -0,0 +1,3 @@ +Solomon Hykes +Josh Hawn (github: jlhawn) +Derek McGowan (github: dmcgowan) diff --git a/tests/Godeps/_workspace/src/github.com/docker/libtrust/README.md b/tests/Godeps/_workspace/src/github.com/docker/libtrust/README.md new file mode 100644 index 0000000000..8e7db38186 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/libtrust/README.md @@ -0,0 +1,18 @@ +# libtrust + +Libtrust is library for managing authentication and authorization using public key cryptography. + +Authentication is handled using the identity attached to the public key. +Libtrust provides multiple methods to prove possession of the private key associated with an identity. + - TLS x509 certificates + - Signature verification + - Key Challenge + +Authorization and access control is managed through a distributed trust graph. +Trust servers are used as the authorities of the trust graph and allow caching portions of the graph for faster access. + +## Copyright and license + +Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license. +Docs released under Creative commons. + diff --git a/tests/Godeps/_workspace/src/github.com/docker/libtrust/certificates.go b/tests/Godeps/_workspace/src/github.com/docker/libtrust/certificates.go new file mode 100644 index 0000000000..3dcca33cb1 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/libtrust/certificates.go @@ -0,0 +1,175 @@ +package libtrust + +import ( + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "io/ioutil" + "math/big" + "net" + "time" +) + +type certTemplateInfo struct { + commonName string + domains []string + ipAddresses []net.IP + isCA bool + clientAuth bool + serverAuth bool +} + +func generateCertTemplate(info *certTemplateInfo) *x509.Certificate { + // Generate a certificate template which is valid from the past week to + // 10 years from now. The usage of the certificate depends on the + // specified fields in the given certTempInfo object. + var ( + keyUsage x509.KeyUsage + extKeyUsage []x509.ExtKeyUsage + ) + + if info.isCA { + keyUsage = x509.KeyUsageCertSign + } + + if info.clientAuth { + extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageClientAuth) + } + + if info.serverAuth { + extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageServerAuth) + } + + return &x509.Certificate{ + SerialNumber: big.NewInt(0), + Subject: pkix.Name{ + CommonName: info.commonName, + }, + NotBefore: time.Now().Add(-time.Hour * 24 * 7), + NotAfter: time.Now().Add(time.Hour * 24 * 365 * 10), + DNSNames: info.domains, + IPAddresses: info.ipAddresses, + IsCA: info.isCA, + KeyUsage: keyUsage, + ExtKeyUsage: extKeyUsage, + BasicConstraintsValid: info.isCA, + } +} + +func generateCert(pub PublicKey, priv PrivateKey, subInfo, issInfo *certTemplateInfo) (cert *x509.Certificate, err error) { + pubCertTemplate := generateCertTemplate(subInfo) + privCertTemplate := generateCertTemplate(issInfo) + + certDER, err := x509.CreateCertificate( + rand.Reader, pubCertTemplate, privCertTemplate, + pub.CryptoPublicKey(), priv.CryptoPrivateKey(), + ) + if err != nil { + return nil, fmt.Errorf("failed to create certificate: %s", err) + } + + cert, err = x509.ParseCertificate(certDER) + if err != nil { + return nil, fmt.Errorf("failed to parse certificate: %s", err) + } + + return +} + +// GenerateSelfSignedServerCert creates a self-signed certificate for the +// given key which is to be used for TLS servers with the given domains and +// IP addresses. +func GenerateSelfSignedServerCert(key PrivateKey, domains []string, ipAddresses []net.IP) (*x509.Certificate, error) { + info := &certTemplateInfo{ + commonName: key.KeyID(), + domains: domains, + ipAddresses: ipAddresses, + serverAuth: true, + } + + return generateCert(key.PublicKey(), key, info, info) +} + +// GenerateSelfSignedClientCert creates a self-signed certificate for the +// given key which is to be used for TLS clients. +func GenerateSelfSignedClientCert(key PrivateKey) (*x509.Certificate, error) { + info := &certTemplateInfo{ + commonName: key.KeyID(), + clientAuth: true, + } + + return generateCert(key.PublicKey(), key, info, info) +} + +// GenerateCACert creates a certificate which can be used as a trusted +// certificate authority. +func GenerateCACert(signer PrivateKey, trustedKey PublicKey) (*x509.Certificate, error) { + subjectInfo := &certTemplateInfo{ + commonName: trustedKey.KeyID(), + isCA: true, + } + issuerInfo := &certTemplateInfo{ + commonName: signer.KeyID(), + } + + return generateCert(trustedKey, signer, subjectInfo, issuerInfo) +} + +// GenerateCACertPool creates a certificate authority pool to be used for a +// TLS configuration. Any self-signed certificates issued by the specified +// trusted keys will be verified during a TLS handshake +func GenerateCACertPool(signer PrivateKey, trustedKeys []PublicKey) (*x509.CertPool, error) { + certPool := x509.NewCertPool() + + for _, trustedKey := range trustedKeys { + cert, err := GenerateCACert(signer, trustedKey) + if err != nil { + return nil, fmt.Errorf("failed to generate CA certificate: %s", err) + } + + certPool.AddCert(cert) + } + + return certPool, nil +} + +// LoadCertificateBundle loads certificates from the given file. The file should be pem encoded +// containing one or more certificates. The expected pem type is "CERTIFICATE". +func LoadCertificateBundle(filename string) ([]*x509.Certificate, error) { + b, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + certificates := []*x509.Certificate{} + var block *pem.Block + block, b = pem.Decode(b) + for ; block != nil; block, b = pem.Decode(b) { + if block.Type == "CERTIFICATE" { + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, err + } + certificates = append(certificates, cert) + } else { + return nil, fmt.Errorf("invalid pem block type: %s", block.Type) + } + } + + return certificates, nil +} + +// LoadCertificatePool loads a CA pool from the given file. The file should be pem encoded +// containing one or more certificates. The expected pem type is "CERTIFICATE". +func LoadCertificatePool(filename string) (*x509.CertPool, error) { + certs, err := LoadCertificateBundle(filename) + if err != nil { + return nil, err + } + pool := x509.NewCertPool() + for _, cert := range certs { + pool.AddCert(cert) + } + return pool, nil +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/libtrust/certificates_test.go b/tests/Godeps/_workspace/src/github.com/docker/libtrust/certificates_test.go new file mode 100644 index 0000000000..c111f3531a --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/libtrust/certificates_test.go @@ -0,0 +1,111 @@ +package libtrust + +import ( + "encoding/pem" + "io/ioutil" + "net" + "os" + "path" + "testing" +) + +func TestGenerateCertificates(t *testing.T) { + key, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + _, err = GenerateSelfSignedServerCert(key, []string{"localhost"}, []net.IP{net.ParseIP("127.0.0.1")}) + if err != nil { + t.Fatal(err) + } + + _, err = GenerateSelfSignedClientCert(key) + if err != nil { + t.Fatal(err) + } +} + +func TestGenerateCACertPool(t *testing.T) { + key, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + caKey1, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + caKey2, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + _, err = GenerateCACertPool(key, []PublicKey{caKey1.PublicKey(), caKey2.PublicKey()}) + if err != nil { + t.Fatal(err) + } +} + +func TestLoadCertificates(t *testing.T) { + key, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + caKey1, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + caKey2, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + cert1, err := GenerateCACert(caKey1, key) + if err != nil { + t.Fatal(err) + } + cert2, err := GenerateCACert(caKey2, key) + if err != nil { + t.Fatal(err) + } + + d, err := ioutil.TempDir("/tmp", "cert-test") + if err != nil { + t.Fatal(err) + } + caFile := path.Join(d, "ca.pem") + f, err := os.OpenFile(caFile, os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + t.Fatal(err) + } + + err = pem.Encode(f, &pem.Block{Type: "CERTIFICATE", Bytes: cert1.Raw}) + if err != nil { + t.Fatal(err) + } + err = pem.Encode(f, &pem.Block{Type: "CERTIFICATE", Bytes: cert2.Raw}) + if err != nil { + t.Fatal(err) + } + f.Close() + + certs, err := LoadCertificateBundle(caFile) + if err != nil { + t.Fatal(err) + } + if len(certs) != 2 { + t.Fatalf("Wrong number of certs received, expected: %d, received %d", 2, len(certs)) + } + + pool, err := LoadCertificatePool(caFile) + if err != nil { + t.Fatal(err) + } + + if len(pool.Subjects()) != 2 { + t.Fatalf("Invalid certificate pool") + } +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/libtrust/doc.go b/tests/Godeps/_workspace/src/github.com/docker/libtrust/doc.go new file mode 100644 index 0000000000..ec5d2159c1 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/libtrust/doc.go @@ -0,0 +1,9 @@ +/* +Package libtrust provides an interface for managing authentication and +authorization using public key cryptography. Authentication is handled +using the identity attached to the public key and verified through TLS +x509 certificates, a key challenge, or signature. Authorization and +access control is managed through a trust graph distributed between +both remote trust servers and locally cached and managed data. +*/ +package libtrust diff --git a/tests/_vendor/src/github.com/docker/libtrust/ec_key.go b/tests/Godeps/_workspace/src/github.com/docker/libtrust/ec_key.go similarity index 96% rename from tests/_vendor/src/github.com/docker/libtrust/ec_key.go rename to tests/Godeps/_workspace/src/github.com/docker/libtrust/ec_key.go index c7ac6844cf..00bbe4b3ca 100644 --- a/tests/_vendor/src/github.com/docker/libtrust/ec_key.go +++ b/tests/Godeps/_workspace/src/github.com/docker/libtrust/ec_key.go @@ -55,16 +55,7 @@ func (k *ecPublicKey) CurveName() string { // KeyID returns a distinct identifier which is unique to this Public Key. func (k *ecPublicKey) KeyID() string { - // Generate and return a libtrust fingerprint of the EC public key. - // For an EC key this should be: - // SHA256("EC"+curveName+bytes(X)+bytes(Y)) - // Then truncated to 240 bits and encoded into 12 base32 groups like so: - // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP - hasher := crypto.SHA256.New() - hasher.Write([]byte(k.KeyType() + k.CurveName())) - hasher.Write(k.X.Bytes()) - hasher.Write(k.Y.Bytes()) - return keyIDEncode(hasher.Sum(nil)[:30]) + return keyIDFromCryptoKey(k) } func (k *ecPublicKey) String() string { @@ -151,7 +142,7 @@ func (k *ecPublicKey) PEMBlock() (*pem.Block, error) { if err != nil { return nil, fmt.Errorf("unable to serialize EC PublicKey to DER-encoded PKIX format: %s", err) } - k.extended["keyID"] = k.KeyID() // For display purposes. + k.extended["kid"] = k.KeyID() // For display purposes. return createPemBlock("PUBLIC KEY", derBytes, k.extended) } diff --git a/tests/Godeps/_workspace/src/github.com/docker/libtrust/ec_key_test.go b/tests/Godeps/_workspace/src/github.com/docker/libtrust/ec_key_test.go new file mode 100644 index 0000000000..26ac381497 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/libtrust/ec_key_test.go @@ -0,0 +1,157 @@ +package libtrust + +import ( + "bytes" + "encoding/json" + "testing" +) + +func generateECTestKeys(t *testing.T) []PrivateKey { + p256Key, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + p384Key, err := GenerateECP384PrivateKey() + if err != nil { + t.Fatal(err) + } + + p521Key, err := GenerateECP521PrivateKey() + if err != nil { + t.Fatal(err) + } + + return []PrivateKey{p256Key, p384Key, p521Key} +} + +func TestECKeys(t *testing.T) { + ecKeys := generateECTestKeys(t) + + for _, ecKey := range ecKeys { + if ecKey.KeyType() != "EC" { + t.Fatalf("key type must be %q, instead got %q", "EC", ecKey.KeyType()) + } + } +} + +func TestECSignVerify(t *testing.T) { + ecKeys := generateECTestKeys(t) + + message := "Hello, World!" + data := bytes.NewReader([]byte(message)) + + sigAlgs := []*signatureAlgorithm{es256, es384, es512} + + for i, ecKey := range ecKeys { + sigAlg := sigAlgs[i] + + t.Logf("%s signature of %q with kid: %s\n", sigAlg.HeaderParam(), message, ecKey.KeyID()) + + data.Seek(0, 0) // Reset the byte reader + + // Sign + sig, alg, err := ecKey.Sign(data, sigAlg.HashID()) + if err != nil { + t.Fatal(err) + } + + data.Seek(0, 0) // Reset the byte reader + + // Verify + err = ecKey.Verify(data, alg, sig) + if err != nil { + t.Fatal(err) + } + } +} + +func TestMarshalUnmarshalECKeys(t *testing.T) { + ecKeys := generateECTestKeys(t) + data := bytes.NewReader([]byte("This is a test. I repeat: this is only a test.")) + sigAlgs := []*signatureAlgorithm{es256, es384, es512} + + for i, ecKey := range ecKeys { + sigAlg := sigAlgs[i] + privateJWKJSON, err := json.MarshalIndent(ecKey, "", " ") + if err != nil { + t.Fatal(err) + } + + publicJWKJSON, err := json.MarshalIndent(ecKey.PublicKey(), "", " ") + if err != nil { + t.Fatal(err) + } + + t.Logf("JWK Private Key: %s", string(privateJWKJSON)) + t.Logf("JWK Public Key: %s", string(publicJWKJSON)) + + privKey2, err := UnmarshalPrivateKeyJWK(privateJWKJSON) + if err != nil { + t.Fatal(err) + } + + pubKey2, err := UnmarshalPublicKeyJWK(publicJWKJSON) + if err != nil { + t.Fatal(err) + } + + // Ensure we can sign/verify a message with the unmarshalled keys. + data.Seek(0, 0) // Reset the byte reader + signature, alg, err := privKey2.Sign(data, sigAlg.HashID()) + if err != nil { + t.Fatal(err) + } + + data.Seek(0, 0) // Reset the byte reader + err = pubKey2.Verify(data, alg, signature) + if err != nil { + t.Fatal(err) + } + } +} + +func TestFromCryptoECKeys(t *testing.T) { + ecKeys := generateECTestKeys(t) + + for _, ecKey := range ecKeys { + cryptoPrivateKey := ecKey.CryptoPrivateKey() + cryptoPublicKey := ecKey.CryptoPublicKey() + + pubKey, err := FromCryptoPublicKey(cryptoPublicKey) + if err != nil { + t.Fatal(err) + } + + if pubKey.KeyID() != ecKey.KeyID() { + t.Fatal("public key key ID mismatch") + } + + privKey, err := FromCryptoPrivateKey(cryptoPrivateKey) + if err != nil { + t.Fatal(err) + } + + if privKey.KeyID() != ecKey.KeyID() { + t.Fatal("public key key ID mismatch") + } + } +} + +func TestExtendedFields(t *testing.T) { + key, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + key.AddExtendedField("test", "foobar") + val := key.GetExtendedField("test") + + gotVal, ok := val.(string) + if !ok { + t.Fatalf("value is not a string") + } else if gotVal != val { + t.Fatalf("value %q is not equal to %q", gotVal, val) + } + +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/libtrust/filter.go b/tests/Godeps/_workspace/src/github.com/docker/libtrust/filter.go new file mode 100644 index 0000000000..5b2b4fca6f --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/libtrust/filter.go @@ -0,0 +1,50 @@ +package libtrust + +import ( + "path/filepath" +) + +// FilterByHosts filters the list of PublicKeys to only those which contain a +// 'hosts' pattern which matches the given host. If *includeEmpty* is true, +// then keys which do not specify any hosts are also returned. +func FilterByHosts(keys []PublicKey, host string, includeEmpty bool) ([]PublicKey, error) { + filtered := make([]PublicKey, 0, len(keys)) + + for _, pubKey := range keys { + var hosts []string + switch v := pubKey.GetExtendedField("hosts").(type) { + case []string: + hosts = v + case []interface{}: + for _, value := range v { + h, ok := value.(string) + if !ok { + continue + } + hosts = append(hosts, h) + } + } + + if len(hosts) == 0 { + if includeEmpty { + filtered = append(filtered, pubKey) + } + continue + } + + // Check if any hosts match pattern + for _, hostPattern := range hosts { + match, err := filepath.Match(hostPattern, host) + if err != nil { + return nil, err + } + + if match { + filtered = append(filtered, pubKey) + continue + } + } + } + + return filtered, nil +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/libtrust/filter_test.go b/tests/Godeps/_workspace/src/github.com/docker/libtrust/filter_test.go new file mode 100644 index 0000000000..997e554c04 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/libtrust/filter_test.go @@ -0,0 +1,81 @@ +package libtrust + +import ( + "testing" +) + +func compareKeySlices(t *testing.T, sliceA, sliceB []PublicKey) { + if len(sliceA) != len(sliceB) { + t.Fatalf("slice size %d, expected %d", len(sliceA), len(sliceB)) + } + + for i, itemA := range sliceA { + itemB := sliceB[i] + if itemA != itemB { + t.Fatalf("slice index %d not equal: %#v != %#v", i, itemA, itemB) + } + } +} + +func TestFilter(t *testing.T) { + keys := make([]PublicKey, 0, 8) + + // Create 8 keys and add host entries. + for i := 0; i < cap(keys); i++ { + key, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + // we use both []interface{} and []string here because jwt uses + // []interface{} format, while PEM uses []string + switch { + case i == 0: + // Don't add entries for this key, key 0. + break + case i%2 == 0: + // Should catch keys 2, 4, and 6. + key.AddExtendedField("hosts", []interface{}{"*.even.example.com"}) + case i == 7: + // Should catch only the last key, and make it match any hostname. + key.AddExtendedField("hosts", []string{"*"}) + default: + // should catch keys 1, 3, 5. + key.AddExtendedField("hosts", []string{"*.example.com"}) + } + + keys = append(keys, key) + } + + // Should match 2 keys, the empty one, and the one that matches all hosts. + matchedKeys, err := FilterByHosts(keys, "foo.bar.com", true) + if err != nil { + t.Fatal(err) + } + expectedMatch := []PublicKey{keys[0], keys[7]} + compareKeySlices(t, expectedMatch, matchedKeys) + + // Should match 1 key, the one that matches any host. + matchedKeys, err = FilterByHosts(keys, "foo.bar.com", false) + if err != nil { + t.Fatal(err) + } + expectedMatch = []PublicKey{keys[7]} + compareKeySlices(t, expectedMatch, matchedKeys) + + // Should match keys that end in "example.com", and the key that matches anything. + matchedKeys, err = FilterByHosts(keys, "foo.example.com", false) + if err != nil { + t.Fatal(err) + } + expectedMatch = []PublicKey{keys[1], keys[3], keys[5], keys[7]} + compareKeySlices(t, expectedMatch, matchedKeys) + + // Should match all of the keys except the empty key. + matchedKeys, err = FilterByHosts(keys, "foo.even.example.com", false) + if err != nil { + t.Fatal(err) + } + expectedMatch = keys[1:] + compareKeySlices(t, expectedMatch, matchedKeys) +} diff --git a/tests/_vendor/src/github.com/docker/libtrust/hash.go b/tests/Godeps/_workspace/src/github.com/docker/libtrust/hash.go similarity index 100% rename from tests/_vendor/src/github.com/docker/libtrust/hash.go rename to tests/Godeps/_workspace/src/github.com/docker/libtrust/hash.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/libtrust/jsonsign.go b/tests/Godeps/_workspace/src/github.com/docker/libtrust/jsonsign.go new file mode 100644 index 0000000000..c63530410d --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/libtrust/jsonsign.go @@ -0,0 +1,566 @@ +package libtrust + +import ( + "bytes" + "crypto" + "crypto/x509" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "time" + "unicode" +) + +var ( + // ErrInvalidSignContent is used when the content to be signed is invalid. + ErrInvalidSignContent = errors.New("invalid sign content") + + // ErrInvalidJSONContent is used when invalid json is encountered. + ErrInvalidJSONContent = errors.New("invalid json content") + + // ErrMissingSignatureKey is used when the specified signature key + // does not exist in the JSON content. + ErrMissingSignatureKey = errors.New("missing signature key") +) + +type jsHeader struct { + JWK PublicKey `json:"jwk,omitempty"` + Algorithm string `json:"alg"` + Chain []string `json:"x5c,omitempty"` +} + +type jsSignature struct { + Header *jsHeader `json:"header"` + Signature string `json:"signature"` + Protected string `json:"protected,omitempty"` +} + +type signKey struct { + PrivateKey + Chain []*x509.Certificate +} + +// JSONSignature represents a signature of a json object. +type JSONSignature struct { + payload string + signatures []*jsSignature + indent string + formatLength int + formatTail []byte +} + +func newJSONSignature() *JSONSignature { + return &JSONSignature{ + signatures: make([]*jsSignature, 0, 1), + } +} + +// Payload returns the encoded payload of the signature. This +// payload should not be signed directly +func (js *JSONSignature) Payload() ([]byte, error) { + return joseBase64UrlDecode(js.payload) +} + +func (js *JSONSignature) protectedHeader() (string, error) { + protected := map[string]interface{}{ + "formatLength": js.formatLength, + "formatTail": joseBase64UrlEncode(js.formatTail), + "time": time.Now().UTC().Format(time.RFC3339), + } + protectedBytes, err := json.Marshal(protected) + if err != nil { + return "", err + } + + return joseBase64UrlEncode(protectedBytes), nil +} + +func (js *JSONSignature) signBytes(protectedHeader string) ([]byte, error) { + buf := make([]byte, len(js.payload)+len(protectedHeader)+1) + copy(buf, protectedHeader) + buf[len(protectedHeader)] = '.' + copy(buf[len(protectedHeader)+1:], js.payload) + return buf, nil +} + +// Sign adds a signature using the given private key. +func (js *JSONSignature) Sign(key PrivateKey) error { + protected, err := js.protectedHeader() + if err != nil { + return err + } + signBytes, err := js.signBytes(protected) + if err != nil { + return err + } + sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256) + if err != nil { + return err + } + + header := &jsHeader{ + JWK: key.PublicKey(), + Algorithm: algorithm, + } + sig := &jsSignature{ + Header: header, + Signature: joseBase64UrlEncode(sigBytes), + Protected: protected, + } + + js.signatures = append(js.signatures, sig) + + return nil +} + +// SignWithChain adds a signature using the given private key +// and setting the x509 chain. The public key of the first element +// in the chain must be the public key corresponding with the sign key. +func (js *JSONSignature) SignWithChain(key PrivateKey, chain []*x509.Certificate) error { + // Ensure key.Chain[0] is public key for key + //key.Chain.PublicKey + //key.PublicKey().CryptoPublicKey() + + // Verify chain + protected, err := js.protectedHeader() + if err != nil { + return err + } + signBytes, err := js.signBytes(protected) + if err != nil { + return err + } + sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256) + if err != nil { + return err + } + + header := &jsHeader{ + Chain: make([]string, len(chain)), + Algorithm: algorithm, + } + + for i, cert := range chain { + header.Chain[i] = base64.StdEncoding.EncodeToString(cert.Raw) + } + + sig := &jsSignature{ + Header: header, + Signature: joseBase64UrlEncode(sigBytes), + Protected: protected, + } + + js.signatures = append(js.signatures, sig) + + return nil +} + +// Verify verifies all the signatures and returns the list of +// public keys used to sign. Any x509 chains are not checked. +func (js *JSONSignature) Verify() ([]PublicKey, error) { + keys := make([]PublicKey, len(js.signatures)) + for i, signature := range js.signatures { + signBytes, err := js.signBytes(signature.Protected) + if err != nil { + return nil, err + } + var publicKey PublicKey + if len(signature.Header.Chain) > 0 { + certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0]) + if err != nil { + return nil, err + } + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + publicKey, err = FromCryptoPublicKey(cert.PublicKey) + if err != nil { + return nil, err + } + } else if signature.Header.JWK != nil { + publicKey = signature.Header.JWK + } else { + return nil, errors.New("missing public key") + } + + sigBytes, err := joseBase64UrlDecode(signature.Signature) + if err != nil { + return nil, err + } + + err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes) + if err != nil { + return nil, err + } + + keys[i] = publicKey + } + return keys, nil +} + +// VerifyChains verifies all the signatures and the chains associated +// with each signature and returns the list of verified chains. +// Signatures without an x509 chain are not checked. +func (js *JSONSignature) VerifyChains(ca *x509.CertPool) ([][]*x509.Certificate, error) { + chains := make([][]*x509.Certificate, 0, len(js.signatures)) + for _, signature := range js.signatures { + signBytes, err := js.signBytes(signature.Protected) + if err != nil { + return nil, err + } + var publicKey PublicKey + if len(signature.Header.Chain) > 0 { + certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0]) + if err != nil { + return nil, err + } + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + publicKey, err = FromCryptoPublicKey(cert.PublicKey) + if err != nil { + return nil, err + } + intermediates := x509.NewCertPool() + if len(signature.Header.Chain) > 1 { + intermediateChain := signature.Header.Chain[1:] + for i := range intermediateChain { + certBytes, err := base64.StdEncoding.DecodeString(intermediateChain[i]) + if err != nil { + return nil, err + } + intermediate, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + intermediates.AddCert(intermediate) + } + } + + verifyOptions := x509.VerifyOptions{ + Intermediates: intermediates, + Roots: ca, + } + + verifiedChains, err := cert.Verify(verifyOptions) + if err != nil { + return nil, err + } + chains = append(chains, verifiedChains...) + + sigBytes, err := joseBase64UrlDecode(signature.Signature) + if err != nil { + return nil, err + } + + err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes) + if err != nil { + return nil, err + } + } + + } + return chains, nil +} + +// JWS returns JSON serialized JWS according to +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-7.2 +func (js *JSONSignature) JWS() ([]byte, error) { + if len(js.signatures) == 0 { + return nil, errors.New("missing signature") + } + jsonMap := map[string]interface{}{ + "payload": js.payload, + "signatures": js.signatures, + } + + return json.MarshalIndent(jsonMap, "", " ") +} + +func notSpace(r rune) bool { + return !unicode.IsSpace(r) +} + +func detectJSONIndent(jsonContent []byte) (indent string) { + if len(jsonContent) > 2 && jsonContent[0] == '{' && jsonContent[1] == '\n' { + quoteIndex := bytes.IndexRune(jsonContent[1:], '"') + if quoteIndex > 0 { + indent = string(jsonContent[2 : quoteIndex+1]) + } + } + return +} + +type jsParsedHeader struct { + JWK json.RawMessage `json:"jwk"` + Algorithm string `json:"alg"` + Chain []string `json:"x5c"` +} + +type jsParsedSignature struct { + Header *jsParsedHeader `json:"header"` + Signature string `json:"signature"` + Protected string `json:"protected"` +} + +// ParseJWS parses a JWS serialized JSON object into a Json Signature. +func ParseJWS(content []byte) (*JSONSignature, error) { + type jsParsed struct { + Payload string `json:"payload"` + Signatures []*jsParsedSignature `json:"signatures"` + } + parsed := &jsParsed{} + err := json.Unmarshal(content, parsed) + if err != nil { + return nil, err + } + if len(parsed.Signatures) == 0 { + return nil, errors.New("missing signatures") + } + payload, err := joseBase64UrlDecode(parsed.Payload) + if err != nil { + return nil, err + } + + js, err := NewJSONSignature(payload) + if err != nil { + return nil, err + } + js.signatures = make([]*jsSignature, len(parsed.Signatures)) + for i, signature := range parsed.Signatures { + header := &jsHeader{ + Algorithm: signature.Header.Algorithm, + } + if signature.Header.Chain != nil { + header.Chain = signature.Header.Chain + } + if signature.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(signature.Header.JWK)) + if err != nil { + return nil, err + } + header.JWK = publicKey + } + js.signatures[i] = &jsSignature{ + Header: header, + Signature: signature.Signature, + Protected: signature.Protected, + } + } + + return js, nil +} + +// NewJSONSignature returns a new unsigned JWS from a json byte array. +// JSONSignature will need to be signed before serializing or storing. +func NewJSONSignature(content []byte) (*JSONSignature, error) { + var dataMap map[string]interface{} + err := json.Unmarshal(content, &dataMap) + if err != nil { + return nil, err + } + + js := newJSONSignature() + js.indent = detectJSONIndent(content) + + js.payload = joseBase64UrlEncode(content) + + // Find trailing } and whitespace, put in protected header + closeIndex := bytes.LastIndexFunc(content, notSpace) + if content[closeIndex] != '}' { + return nil, ErrInvalidJSONContent + } + lastRuneIndex := bytes.LastIndexFunc(content[:closeIndex], notSpace) + if content[lastRuneIndex] == ',' { + return nil, ErrInvalidJSONContent + } + js.formatLength = lastRuneIndex + 1 + js.formatTail = content[js.formatLength:] + + return js, nil +} + +// NewJSONSignatureFromMap returns a new unsigned JSONSignature from a map or +// struct. JWS will need to be signed before serializing or storing. +func NewJSONSignatureFromMap(content interface{}) (*JSONSignature, error) { + switch content.(type) { + case map[string]interface{}: + case struct{}: + default: + return nil, errors.New("invalid data type") + } + + js := newJSONSignature() + js.indent = " " + + payload, err := json.MarshalIndent(content, "", js.indent) + if err != nil { + return nil, err + } + js.payload = joseBase64UrlEncode(payload) + + // Remove '\n}' from formatted section, put in protected header + js.formatLength = len(payload) - 2 + js.formatTail = payload[js.formatLength:] + + return js, nil +} + +func readIntFromMap(key string, m map[string]interface{}) (int, bool) { + value, ok := m[key] + if !ok { + return 0, false + } + switch v := value.(type) { + case int: + return v, true + case float64: + return int(v), true + default: + return 0, false + } +} + +func readStringFromMap(key string, m map[string]interface{}) (v string, ok bool) { + value, ok := m[key] + if !ok { + return "", false + } + v, ok = value.(string) + return +} + +// ParsePrettySignature parses a formatted signature into a +// JSON signature. If the signatures are missing the format information +// an error is thrown. The formatted signature must be created by +// the same method as format signature. +func ParsePrettySignature(content []byte, signatureKey string) (*JSONSignature, error) { + var contentMap map[string]json.RawMessage + err := json.Unmarshal(content, &contentMap) + if err != nil { + return nil, fmt.Errorf("error unmarshalling content: %s", err) + } + sigMessage, ok := contentMap[signatureKey] + if !ok { + return nil, ErrMissingSignatureKey + } + + var signatureBlocks []jsParsedSignature + err = json.Unmarshal([]byte(sigMessage), &signatureBlocks) + if err != nil { + return nil, fmt.Errorf("error unmarshalling signatures: %s", err) + } + + js := newJSONSignature() + js.signatures = make([]*jsSignature, len(signatureBlocks)) + + for i, signatureBlock := range signatureBlocks { + protectedBytes, err := joseBase64UrlDecode(signatureBlock.Protected) + if err != nil { + return nil, fmt.Errorf("base64 decode error: %s", err) + } + var protectedHeader map[string]interface{} + err = json.Unmarshal(protectedBytes, &protectedHeader) + if err != nil { + return nil, fmt.Errorf("error unmarshalling protected header: %s", err) + } + + formatLength, ok := readIntFromMap("formatLength", protectedHeader) + if !ok { + return nil, errors.New("missing formatted length") + } + encodedTail, ok := readStringFromMap("formatTail", protectedHeader) + if !ok { + return nil, errors.New("missing formatted tail") + } + formatTail, err := joseBase64UrlDecode(encodedTail) + if err != nil { + return nil, fmt.Errorf("base64 decode error on tail: %s", err) + } + if js.formatLength == 0 { + js.formatLength = formatLength + } else if js.formatLength != formatLength { + return nil, errors.New("conflicting format length") + } + if len(js.formatTail) == 0 { + js.formatTail = formatTail + } else if bytes.Compare(js.formatTail, formatTail) != 0 { + return nil, errors.New("conflicting format tail") + } + + header := &jsHeader{ + Algorithm: signatureBlock.Header.Algorithm, + Chain: signatureBlock.Header.Chain, + } + if signatureBlock.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(signatureBlock.Header.JWK)) + if err != nil { + return nil, fmt.Errorf("error unmarshalling public key: %s", err) + } + header.JWK = publicKey + } + js.signatures[i] = &jsSignature{ + Header: header, + Signature: signatureBlock.Signature, + Protected: signatureBlock.Protected, + } + } + if js.formatLength > len(content) { + return nil, errors.New("invalid format length") + } + formatted := make([]byte, js.formatLength+len(js.formatTail)) + copy(formatted, content[:js.formatLength]) + copy(formatted[js.formatLength:], js.formatTail) + js.indent = detectJSONIndent(formatted) + js.payload = joseBase64UrlEncode(formatted) + + return js, nil +} + +// PrettySignature formats a json signature into an easy to read +// single json serialized object. +func (js *JSONSignature) PrettySignature(signatureKey string) ([]byte, error) { + if len(js.signatures) == 0 { + return nil, errors.New("no signatures") + } + payload, err := joseBase64UrlDecode(js.payload) + if err != nil { + return nil, err + } + payload = payload[:js.formatLength] + + var marshalled []byte + var marshallErr error + if js.indent != "" { + marshalled, marshallErr = json.MarshalIndent(js.signatures, js.indent, js.indent) + } else { + marshalled, marshallErr = json.Marshal(js.signatures) + } + if marshallErr != nil { + return nil, marshallErr + } + + buf := bytes.NewBuffer(make([]byte, 0, len(payload)+len(marshalled)+34)) + buf.Write(payload) + buf.WriteByte(',') + if js.indent != "" { + buf.WriteByte('\n') + buf.WriteString(js.indent) + buf.WriteByte('"') + buf.WriteString(signatureKey) + buf.WriteString("\": ") + buf.Write(marshalled) + buf.WriteByte('\n') + } else { + buf.WriteByte('"') + buf.WriteString(signatureKey) + buf.WriteString("\":") + buf.Write(marshalled) + } + buf.WriteByte('}') + + return buf.Bytes(), nil +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/libtrust/jsonsign_test.go b/tests/Godeps/_workspace/src/github.com/docker/libtrust/jsonsign_test.go new file mode 100644 index 0000000000..59616b9f4e --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/libtrust/jsonsign_test.go @@ -0,0 +1,297 @@ +package libtrust + +import ( + "bytes" + "crypto/x509" + "encoding/json" + "fmt" + "testing" + + "github.com/docker/libtrust/testutil" +) + +func createTestJSON(sigKey string, indent string) (map[string]interface{}, []byte) { + testMap := map[string]interface{}{ + "name": "dmcgowan/mycontainer", + "config": map[string]interface{}{ + "ports": []int{9101, 9102}, + "run": "/bin/echo \"Hello\"", + }, + "layers": []string{ + "2893c080-27f5-11e4-8c21-0800200c9a66", + "c54bc25b-fbb2-497b-a899-a8bc1b5b9d55", + "4d5d7e03-f908-49f3-a7f6-9ba28dfe0fb4", + "0b6da891-7f7f-4abf-9c97-7887549e696c", + "1d960389-ae4f-4011-85fd-18d0f96a67ad", + }, + } + formattedSection := `{"config":{"ports":[9101,9102],"run":"/bin/echo \"Hello\""},"layers":["2893c080-27f5-11e4-8c21-0800200c9a66","c54bc25b-fbb2-497b-a899-a8bc1b5b9d55","4d5d7e03-f908-49f3-a7f6-9ba28dfe0fb4","0b6da891-7f7f-4abf-9c97-7887549e696c","1d960389-ae4f-4011-85fd-18d0f96a67ad"],"name":"dmcgowan/mycontainer","%s":[{"header":{` + formattedSection = fmt.Sprintf(formattedSection, sigKey) + if indent != "" { + buf := bytes.NewBuffer(nil) + json.Indent(buf, []byte(formattedSection), "", indent) + return testMap, buf.Bytes() + } + return testMap, []byte(formattedSection) + +} + +func TestSignJSON(t *testing.T) { + key, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("Error generating EC key: %s", err) + } + + testMap, _ := createTestJSON("buildSignatures", " ") + indented, err := json.MarshalIndent(testMap, "", " ") + if err != nil { + t.Fatalf("Marshall error: %s", err) + } + + js, err := NewJSONSignature(indented) + if err != nil { + t.Fatalf("Error creating JSON signature: %s", err) + } + err = js.Sign(key) + if err != nil { + t.Fatalf("Error signing content: %s", err) + } + + keys, err := js.Verify() + if err != nil { + t.Fatalf("Error verifying signature: %s", err) + } + if len(keys) != 1 { + t.Fatalf("Error wrong number of keys returned") + } + if keys[0].KeyID() != key.KeyID() { + t.Fatalf("Unexpected public key returned") + } + +} + +func TestSignMap(t *testing.T) { + key, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("Error generating EC key: %s", err) + } + + testMap, _ := createTestJSON("buildSignatures", " ") + js, err := NewJSONSignatureFromMap(testMap) + if err != nil { + t.Fatalf("Error creating JSON signature: %s", err) + } + err = js.Sign(key) + if err != nil { + t.Fatalf("Error signing JSON signature: %s", err) + } + + keys, err := js.Verify() + if err != nil { + t.Fatalf("Error verifying signature: %s", err) + } + if len(keys) != 1 { + t.Fatalf("Error wrong number of keys returned") + } + if keys[0].KeyID() != key.KeyID() { + t.Fatalf("Unexpected public key returned") + } +} + +func TestFormattedJson(t *testing.T) { + key, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("Error generating EC key: %s", err) + } + + testMap, firstSection := createTestJSON("buildSignatures", " ") + indented, err := json.MarshalIndent(testMap, "", " ") + if err != nil { + t.Fatalf("Marshall error: %s", err) + } + + js, err := NewJSONSignature(indented) + if err != nil { + t.Fatalf("Error creating JSON signature: %s", err) + } + err = js.Sign(key) + if err != nil { + t.Fatalf("Error signing content: %s", err) + } + + b, err := js.PrettySignature("buildSignatures") + if err != nil { + t.Fatalf("Error signing map: %s", err) + } + + if bytes.Compare(b[:len(firstSection)], firstSection) != 0 { + t.Fatalf("Wrong signed value\nExpected:\n%s\nActual:\n%s", firstSection, b[:len(firstSection)]) + } + + parsed, err := ParsePrettySignature(b, "buildSignatures") + if err != nil { + t.Fatalf("Error parsing formatted signature: %s", err) + } + + keys, err := parsed.Verify() + if err != nil { + t.Fatalf("Error verifying signature: %s", err) + } + if len(keys) != 1 { + t.Fatalf("Error wrong number of keys returned") + } + if keys[0].KeyID() != key.KeyID() { + t.Fatalf("Unexpected public key returned") + } + + var unmarshalled map[string]interface{} + err = json.Unmarshal(b, &unmarshalled) + if err != nil { + t.Fatalf("Could not unmarshall after parse: %s", err) + } + +} + +func TestFormattedFlatJson(t *testing.T) { + key, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("Error generating EC key: %s", err) + } + + testMap, firstSection := createTestJSON("buildSignatures", "") + unindented, err := json.Marshal(testMap) + if err != nil { + t.Fatalf("Marshall error: %s", err) + } + + js, err := NewJSONSignature(unindented) + if err != nil { + t.Fatalf("Error creating JSON signature: %s", err) + } + err = js.Sign(key) + if err != nil { + t.Fatalf("Error signing JSON signature: %s", err) + } + + b, err := js.PrettySignature("buildSignatures") + if err != nil { + t.Fatalf("Error signing map: %s", err) + } + + if bytes.Compare(b[:len(firstSection)], firstSection) != 0 { + t.Fatalf("Wrong signed value\nExpected:\n%s\nActual:\n%s", firstSection, b[:len(firstSection)]) + } + + parsed, err := ParsePrettySignature(b, "buildSignatures") + if err != nil { + t.Fatalf("Error parsing formatted signature: %s", err) + } + + keys, err := parsed.Verify() + if err != nil { + t.Fatalf("Error verifying signature: %s", err) + } + if len(keys) != 1 { + t.Fatalf("Error wrong number of keys returned") + } + if keys[0].KeyID() != key.KeyID() { + t.Fatalf("Unexpected public key returned") + } +} + +func generateTrustChain(t *testing.T, key PrivateKey, ca *x509.Certificate) (PrivateKey, []*x509.Certificate) { + parent := ca + parentKey := key + chain := make([]*x509.Certificate, 6) + for i := 5; i > 0; i-- { + intermediatekey, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("Error generate key: %s", err) + } + chain[i], err = testutil.GenerateIntermediate(intermediatekey.CryptoPublicKey(), parentKey.CryptoPrivateKey(), parent) + if err != nil { + t.Fatalf("Error generating intermdiate certificate: %s", err) + } + parent = chain[i] + parentKey = intermediatekey + } + trustKey, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("Error generate key: %s", err) + } + chain[0], err = testutil.GenerateTrustCert(trustKey.CryptoPublicKey(), parentKey.CryptoPrivateKey(), parent) + if err != nil { + t.Fatalf("Error generate trust cert: %s", err) + } + + return trustKey, chain +} + +func TestChainVerify(t *testing.T) { + caKey, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("Error generating key: %s", err) + } + ca, err := testutil.GenerateTrustCA(caKey.CryptoPublicKey(), caKey.CryptoPrivateKey()) + if err != nil { + t.Fatalf("Error generating ca: %s", err) + } + trustKey, chain := generateTrustChain(t, caKey, ca) + + testMap, _ := createTestJSON("verifySignatures", " ") + js, err := NewJSONSignatureFromMap(testMap) + if err != nil { + t.Fatalf("Error creating JSONSignature from map: %s", err) + } + + err = js.SignWithChain(trustKey, chain) + if err != nil { + t.Fatalf("Error signing with chain: %s", err) + } + + pool := x509.NewCertPool() + pool.AddCert(ca) + chains, err := js.VerifyChains(pool) + if err != nil { + t.Fatalf("Error verifying content: %s", err) + } + if len(chains) != 1 { + t.Fatalf("Unexpected chains length: %d", len(chains)) + } + if len(chains[0]) != 7 { + t.Fatalf("Unexpected chain length: %d", len(chains[0])) + } +} + +func TestInvalidChain(t *testing.T) { + caKey, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("Error generating key: %s", err) + } + ca, err := testutil.GenerateTrustCA(caKey.CryptoPublicKey(), caKey.CryptoPrivateKey()) + if err != nil { + t.Fatalf("Error generating ca: %s", err) + } + trustKey, chain := generateTrustChain(t, caKey, ca) + + testMap, _ := createTestJSON("verifySignatures", " ") + js, err := NewJSONSignatureFromMap(testMap) + if err != nil { + t.Fatalf("Error creating JSONSignature from map: %s", err) + } + + err = js.SignWithChain(trustKey, chain[:5]) + if err != nil { + t.Fatalf("Error signing with chain: %s", err) + } + + pool := x509.NewCertPool() + pool.AddCert(ca) + chains, err := js.VerifyChains(pool) + if err == nil { + t.Fatalf("Expected error verifying with bad chain") + } + if len(chains) != 0 { + t.Fatalf("Unexpected chains returned from invalid verify") + } +} diff --git a/tests/_vendor/src/github.com/docker/libtrust/key.go b/tests/Godeps/_workspace/src/github.com/docker/libtrust/key.go similarity index 100% rename from tests/_vendor/src/github.com/docker/libtrust/key.go rename to tests/Godeps/_workspace/src/github.com/docker/libtrust/key.go diff --git a/tests/_vendor/src/github.com/docker/libtrust/key_files.go b/tests/Godeps/_workspace/src/github.com/docker/libtrust/key_files.go similarity index 100% rename from tests/_vendor/src/github.com/docker/libtrust/key_files.go rename to tests/Godeps/_workspace/src/github.com/docker/libtrust/key_files.go diff --git a/tests/Godeps/_workspace/src/github.com/docker/libtrust/key_files_test.go b/tests/Godeps/_workspace/src/github.com/docker/libtrust/key_files_test.go new file mode 100644 index 0000000000..57e691f2ed --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/libtrust/key_files_test.go @@ -0,0 +1,220 @@ +package libtrust + +import ( + "errors" + "io/ioutil" + "os" + "testing" +) + +func makeTempFile(t *testing.T, prefix string) (filename string) { + file, err := ioutil.TempFile("", prefix) + if err != nil { + t.Fatal(err) + } + + filename = file.Name() + file.Close() + + return +} + +func TestKeyFiles(t *testing.T) { + key, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + testKeyFiles(t, key) + + key, err = GenerateRSA2048PrivateKey() + if err != nil { + t.Fatal(err) + } + + testKeyFiles(t, key) +} + +func testKeyFiles(t *testing.T, key PrivateKey) { + var err error + + privateKeyFilename := makeTempFile(t, "private_key") + privateKeyFilenamePEM := privateKeyFilename + ".pem" + privateKeyFilenameJWK := privateKeyFilename + ".jwk" + + publicKeyFilename := makeTempFile(t, "public_key") + publicKeyFilenamePEM := publicKeyFilename + ".pem" + publicKeyFilenameJWK := publicKeyFilename + ".jwk" + + if err = SaveKey(privateKeyFilenamePEM, key); err != nil { + t.Fatal(err) + } + + if err = SaveKey(privateKeyFilenameJWK, key); err != nil { + t.Fatal(err) + } + + if err = SavePublicKey(publicKeyFilenamePEM, key.PublicKey()); err != nil { + t.Fatal(err) + } + + if err = SavePublicKey(publicKeyFilenameJWK, key.PublicKey()); err != nil { + t.Fatal(err) + } + + loadedPEMKey, err := LoadKeyFile(privateKeyFilenamePEM) + if err != nil { + t.Fatal(err) + } + + loadedJWKKey, err := LoadKeyFile(privateKeyFilenameJWK) + if err != nil { + t.Fatal(err) + } + + loadedPEMPublicKey, err := LoadPublicKeyFile(publicKeyFilenamePEM) + if err != nil { + t.Fatal(err) + } + + loadedJWKPublicKey, err := LoadPublicKeyFile(publicKeyFilenameJWK) + if err != nil { + t.Fatal(err) + } + + if key.KeyID() != loadedPEMKey.KeyID() { + t.Fatal(errors.New("key IDs do not match")) + } + + if key.KeyID() != loadedJWKKey.KeyID() { + t.Fatal(errors.New("key IDs do not match")) + } + + if key.KeyID() != loadedPEMPublicKey.KeyID() { + t.Fatal(errors.New("key IDs do not match")) + } + + if key.KeyID() != loadedJWKPublicKey.KeyID() { + t.Fatal(errors.New("key IDs do not match")) + } + + os.Remove(privateKeyFilename) + os.Remove(privateKeyFilenamePEM) + os.Remove(privateKeyFilenameJWK) + os.Remove(publicKeyFilename) + os.Remove(publicKeyFilenamePEM) + os.Remove(publicKeyFilenameJWK) +} + +func TestTrustedHostKeysFile(t *testing.T) { + trustedHostKeysFilename := makeTempFile(t, "trusted_host_keys") + trustedHostKeysFilenamePEM := trustedHostKeysFilename + ".pem" + trustedHostKeysFilenameJWK := trustedHostKeysFilename + ".json" + + testTrustedHostKeysFile(t, trustedHostKeysFilenamePEM) + testTrustedHostKeysFile(t, trustedHostKeysFilenameJWK) + + os.Remove(trustedHostKeysFilename) + os.Remove(trustedHostKeysFilenamePEM) + os.Remove(trustedHostKeysFilenameJWK) +} + +func testTrustedHostKeysFile(t *testing.T, trustedHostKeysFilename string) { + hostAddress1 := "docker.example.com:2376" + hostKey1, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + hostKey1.AddExtendedField("hosts", []string{hostAddress1}) + err = AddKeySetFile(trustedHostKeysFilename, hostKey1.PublicKey()) + if err != nil { + t.Fatal(err) + } + + trustedHostKeysMapping, err := LoadKeySetFile(trustedHostKeysFilename) + if err != nil { + t.Fatal(err) + } + + for addr, hostKey := range trustedHostKeysMapping { + t.Logf("Host Address: %d\n", addr) + t.Logf("Host Key: %s\n\n", hostKey) + } + + hostAddress2 := "192.168.59.103:2376" + hostKey2, err := GenerateRSA2048PrivateKey() + if err != nil { + t.Fatal(err) + } + + hostKey2.AddExtendedField("hosts", hostAddress2) + err = AddKeySetFile(trustedHostKeysFilename, hostKey2.PublicKey()) + if err != nil { + t.Fatal(err) + } + + trustedHostKeysMapping, err = LoadKeySetFile(trustedHostKeysFilename) + if err != nil { + t.Fatal(err) + } + + for addr, hostKey := range trustedHostKeysMapping { + t.Logf("Host Address: %d\n", addr) + t.Logf("Host Key: %s\n\n", hostKey) + } + +} + +func TestTrustedClientKeysFile(t *testing.T) { + trustedClientKeysFilename := makeTempFile(t, "trusted_client_keys") + trustedClientKeysFilenamePEM := trustedClientKeysFilename + ".pem" + trustedClientKeysFilenameJWK := trustedClientKeysFilename + ".json" + + testTrustedClientKeysFile(t, trustedClientKeysFilenamePEM) + testTrustedClientKeysFile(t, trustedClientKeysFilenameJWK) + + os.Remove(trustedClientKeysFilename) + os.Remove(trustedClientKeysFilenamePEM) + os.Remove(trustedClientKeysFilenameJWK) +} + +func testTrustedClientKeysFile(t *testing.T, trustedClientKeysFilename string) { + clientKey1, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + err = AddKeySetFile(trustedClientKeysFilename, clientKey1.PublicKey()) + if err != nil { + t.Fatal(err) + } + + trustedClientKeys, err := LoadKeySetFile(trustedClientKeysFilename) + if err != nil { + t.Fatal(err) + } + + for _, clientKey := range trustedClientKeys { + t.Logf("Client Key: %s\n", clientKey) + } + + clientKey2, err := GenerateRSA2048PrivateKey() + if err != nil { + t.Fatal(err) + } + + err = AddKeySetFile(trustedClientKeysFilename, clientKey2.PublicKey()) + if err != nil { + t.Fatal(err) + } + + trustedClientKeys, err = LoadKeySetFile(trustedClientKeysFilename) + if err != nil { + t.Fatal(err) + } + + for _, clientKey := range trustedClientKeys { + t.Logf("Client Key: %s\n", clientKey) + } +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/libtrust/key_test.go b/tests/Godeps/_workspace/src/github.com/docker/libtrust/key_test.go new file mode 100644 index 0000000000..f6c59cc42b --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/libtrust/key_test.go @@ -0,0 +1,80 @@ +package libtrust + +import ( + "testing" +) + +type generateFunc func() (PrivateKey, error) + +func runGenerateBench(b *testing.B, f generateFunc, name string) { + for i := 0; i < b.N; i++ { + _, err := f() + if err != nil { + b.Fatalf("Error generating %s: %s", name, err) + } + } +} + +func runFingerprintBench(b *testing.B, f generateFunc, name string) { + b.StopTimer() + // Don't count this relatively slow generation call. + key, err := f() + if err != nil { + b.Fatalf("Error generating %s: %s", name, err) + } + b.StartTimer() + + for i := 0; i < b.N; i++ { + if key.KeyID() == "" { + b.Fatalf("Error generating key ID for %s", name) + } + } +} + +func BenchmarkECP256Generate(b *testing.B) { + runGenerateBench(b, GenerateECP256PrivateKey, "P256") +} + +func BenchmarkECP384Generate(b *testing.B) { + runGenerateBench(b, GenerateECP384PrivateKey, "P384") +} + +func BenchmarkECP521Generate(b *testing.B) { + runGenerateBench(b, GenerateECP521PrivateKey, "P521") +} + +func BenchmarkRSA2048Generate(b *testing.B) { + runGenerateBench(b, GenerateRSA2048PrivateKey, "RSA2048") +} + +func BenchmarkRSA3072Generate(b *testing.B) { + runGenerateBench(b, GenerateRSA3072PrivateKey, "RSA3072") +} + +func BenchmarkRSA4096Generate(b *testing.B) { + runGenerateBench(b, GenerateRSA4096PrivateKey, "RSA4096") +} + +func BenchmarkECP256Fingerprint(b *testing.B) { + runFingerprintBench(b, GenerateECP256PrivateKey, "P256") +} + +func BenchmarkECP384Fingerprint(b *testing.B) { + runFingerprintBench(b, GenerateECP384PrivateKey, "P384") +} + +func BenchmarkECP521Fingerprint(b *testing.B) { + runFingerprintBench(b, GenerateECP521PrivateKey, "P521") +} + +func BenchmarkRSA2048Fingerprint(b *testing.B) { + runFingerprintBench(b, GenerateRSA2048PrivateKey, "RSA2048") +} + +func BenchmarkRSA3072Fingerprint(b *testing.B) { + runFingerprintBench(b, GenerateRSA3072PrivateKey, "RSA3072") +} + +func BenchmarkRSA4096Fingerprint(b *testing.B) { + runFingerprintBench(b, GenerateRSA4096PrivateKey, "RSA4096") +} diff --git a/tests/_vendor/src/github.com/docker/libtrust/rsa_key.go b/tests/Godeps/_workspace/src/github.com/docker/libtrust/rsa_key.go similarity index 96% rename from tests/_vendor/src/github.com/docker/libtrust/rsa_key.go rename to tests/Godeps/_workspace/src/github.com/docker/libtrust/rsa_key.go index 45463039d2..dac4cacf20 100644 --- a/tests/_vendor/src/github.com/docker/libtrust/rsa_key.go +++ b/tests/Godeps/_workspace/src/github.com/docker/libtrust/rsa_key.go @@ -34,16 +34,7 @@ func (k *rsaPublicKey) KeyType() string { // KeyID returns a distinct identifier which is unique to this Public Key. func (k *rsaPublicKey) KeyID() string { - // Generate and return a 'libtrust' fingerprint of the RSA public key. - // For an RSA key this should be: - // SHA256("RSA"+bytes(N)+bytes(E)) - // Then truncated to 240 bits and encoded into 12 base32 groups like so: - // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP - hasher := crypto.SHA256.New() - hasher.Write([]byte(k.KeyType())) - hasher.Write(k.N.Bytes()) - hasher.Write(serializeRSAPublicExponentParam(k.E)) - return keyIDEncode(hasher.Sum(nil)[:30]) + return keyIDFromCryptoKey(k) } func (k *rsaPublicKey) String() string { @@ -108,7 +99,7 @@ func (k *rsaPublicKey) PEMBlock() (*pem.Block, error) { if err != nil { return nil, fmt.Errorf("unable to serialize RSA PublicKey to DER-encoded PKIX format: %s", err) } - k.extended["keyID"] = k.KeyID() // For display purposes. + k.extended["kid"] = k.KeyID() // For display purposes. return createPemBlock("PUBLIC KEY", derBytes, k.extended) } diff --git a/tests/Godeps/_workspace/src/github.com/docker/libtrust/rsa_key_test.go b/tests/Godeps/_workspace/src/github.com/docker/libtrust/rsa_key_test.go new file mode 100644 index 0000000000..5ec7707aa6 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/libtrust/rsa_key_test.go @@ -0,0 +1,157 @@ +package libtrust + +import ( + "bytes" + "encoding/json" + "log" + "testing" +) + +var rsaKeys []PrivateKey + +func init() { + var err error + rsaKeys, err = generateRSATestKeys() + if err != nil { + log.Fatal(err) + } +} + +func generateRSATestKeys() (keys []PrivateKey, err error) { + log.Println("Generating RSA 2048-bit Test Key") + rsa2048Key, err := GenerateRSA2048PrivateKey() + if err != nil { + return + } + + log.Println("Generating RSA 3072-bit Test Key") + rsa3072Key, err := GenerateRSA3072PrivateKey() + if err != nil { + return + } + + log.Println("Generating RSA 4096-bit Test Key") + rsa4096Key, err := GenerateRSA4096PrivateKey() + if err != nil { + return + } + + log.Println("Done generating RSA Test Keys!") + keys = []PrivateKey{rsa2048Key, rsa3072Key, rsa4096Key} + + return +} + +func TestRSAKeys(t *testing.T) { + for _, rsaKey := range rsaKeys { + if rsaKey.KeyType() != "RSA" { + t.Fatalf("key type must be %q, instead got %q", "RSA", rsaKey.KeyType()) + } + } +} + +func TestRSASignVerify(t *testing.T) { + message := "Hello, World!" + data := bytes.NewReader([]byte(message)) + + sigAlgs := []*signatureAlgorithm{rs256, rs384, rs512} + + for i, rsaKey := range rsaKeys { + sigAlg := sigAlgs[i] + + t.Logf("%s signature of %q with kid: %s\n", sigAlg.HeaderParam(), message, rsaKey.KeyID()) + + data.Seek(0, 0) // Reset the byte reader + + // Sign + sig, alg, err := rsaKey.Sign(data, sigAlg.HashID()) + if err != nil { + t.Fatal(err) + } + + data.Seek(0, 0) // Reset the byte reader + + // Verify + err = rsaKey.Verify(data, alg, sig) + if err != nil { + t.Fatal(err) + } + } +} + +func TestMarshalUnmarshalRSAKeys(t *testing.T) { + data := bytes.NewReader([]byte("This is a test. I repeat: this is only a test.")) + sigAlgs := []*signatureAlgorithm{rs256, rs384, rs512} + + for i, rsaKey := range rsaKeys { + sigAlg := sigAlgs[i] + privateJWKJSON, err := json.MarshalIndent(rsaKey, "", " ") + if err != nil { + t.Fatal(err) + } + + publicJWKJSON, err := json.MarshalIndent(rsaKey.PublicKey(), "", " ") + if err != nil { + t.Fatal(err) + } + + t.Logf("JWK Private Key: %s", string(privateJWKJSON)) + t.Logf("JWK Public Key: %s", string(publicJWKJSON)) + + privKey2, err := UnmarshalPrivateKeyJWK(privateJWKJSON) + if err != nil { + t.Fatal(err) + } + + pubKey2, err := UnmarshalPublicKeyJWK(publicJWKJSON) + if err != nil { + t.Fatal(err) + } + + // Ensure we can sign/verify a message with the unmarshalled keys. + data.Seek(0, 0) // Reset the byte reader + signature, alg, err := privKey2.Sign(data, sigAlg.HashID()) + if err != nil { + t.Fatal(err) + } + + data.Seek(0, 0) // Reset the byte reader + err = pubKey2.Verify(data, alg, signature) + if err != nil { + t.Fatal(err) + } + + // It's a good idea to validate the Private Key to make sure our + // (un)marshal process didn't corrupt the extra parameters. + k := privKey2.(*rsaPrivateKey) + err = k.PrivateKey.Validate() + if err != nil { + t.Fatal(err) + } + } +} + +func TestFromCryptoRSAKeys(t *testing.T) { + for _, rsaKey := range rsaKeys { + cryptoPrivateKey := rsaKey.CryptoPrivateKey() + cryptoPublicKey := rsaKey.CryptoPublicKey() + + pubKey, err := FromCryptoPublicKey(cryptoPublicKey) + if err != nil { + t.Fatal(err) + } + + if pubKey.KeyID() != rsaKey.KeyID() { + t.Fatal("public key key ID mismatch") + } + + privKey, err := FromCryptoPrivateKey(cryptoPrivateKey) + if err != nil { + t.Fatal(err) + } + + if privKey.KeyID() != rsaKey.KeyID() { + t.Fatal("public key key ID mismatch") + } + } +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/libtrust/testutil/certificates.go b/tests/Godeps/_workspace/src/github.com/docker/libtrust/testutil/certificates.go new file mode 100644 index 0000000000..89debf6b64 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/libtrust/testutil/certificates.go @@ -0,0 +1,94 @@ +package testutil + +import ( + "crypto" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "math/big" + "time" +) + +// GenerateTrustCA generates a new certificate authority for testing. +func GenerateTrustCA(pub crypto.PublicKey, priv crypto.PrivateKey) (*x509.Certificate, error) { + cert := &x509.Certificate{ + SerialNumber: big.NewInt(0), + Subject: pkix.Name{ + CommonName: "CA Root", + }, + NotBefore: time.Now().Add(-time.Second), + NotAfter: time.Now().Add(time.Hour), + IsCA: true, + KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign, + BasicConstraintsValid: true, + } + + certDER, err := x509.CreateCertificate(rand.Reader, cert, cert, pub, priv) + if err != nil { + return nil, err + } + + cert, err = x509.ParseCertificate(certDER) + if err != nil { + return nil, err + } + + return cert, nil +} + +// GenerateIntermediate generates an intermediate certificate for testing using +// the parent certificate (likely a CA) and the provided keys. +func GenerateIntermediate(key crypto.PublicKey, parentKey crypto.PrivateKey, parent *x509.Certificate) (*x509.Certificate, error) { + cert := &x509.Certificate{ + SerialNumber: big.NewInt(0), + Subject: pkix.Name{ + CommonName: "Intermediate", + }, + NotBefore: time.Now().Add(-time.Second), + NotAfter: time.Now().Add(time.Hour), + IsCA: true, + KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign, + BasicConstraintsValid: true, + } + + certDER, err := x509.CreateCertificate(rand.Reader, cert, parent, key, parentKey) + if err != nil { + return nil, err + } + + cert, err = x509.ParseCertificate(certDER) + if err != nil { + return nil, err + } + + return cert, nil +} + +// GenerateTrustCert generates a new trust certificate for testing. Unlike the +// intermediate certificates, this certificate should be used for signature +// only, not creating certificates. +func GenerateTrustCert(key crypto.PublicKey, parentKey crypto.PrivateKey, parent *x509.Certificate) (*x509.Certificate, error) { + cert := &x509.Certificate{ + SerialNumber: big.NewInt(0), + Subject: pkix.Name{ + CommonName: "Trust Cert", + }, + NotBefore: time.Now().Add(-time.Second), + NotAfter: time.Now().Add(time.Hour), + IsCA: true, + KeyUsage: x509.KeyUsageDigitalSignature, + BasicConstraintsValid: true, + } + + certDER, err := x509.CreateCertificate(rand.Reader, cert, parent, key, parentKey) + if err != nil { + return nil, err + } + + cert, err = x509.ParseCertificate(certDER) + if err != nil { + return nil, err + } + + return cert, nil +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/README.md b/tests/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/README.md new file mode 100644 index 0000000000..24124db216 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/README.md @@ -0,0 +1,50 @@ +## Libtrust TLS Config Demo + +This program generates key pairs and trust files for a TLS client and server. + +To generate the keys, run: + +``` +$ go run genkeys.go +``` + +The generated files are: + +``` +$ ls -l client_data/ server_data/ +client_data/: +total 24 +-rw------- 1 jlhawn staff 281 Aug 8 16:21 private_key.json +-rw-r--r-- 1 jlhawn staff 225 Aug 8 16:21 public_key.json +-rw-r--r-- 1 jlhawn staff 275 Aug 8 16:21 trusted_hosts.json + +server_data/: +total 24 +-rw-r--r-- 1 jlhawn staff 348 Aug 8 16:21 trusted_clients.json +-rw------- 1 jlhawn staff 281 Aug 8 16:21 private_key.json +-rw-r--r-- 1 jlhawn staff 225 Aug 8 16:21 public_key.json +``` + +The private key and public key for the client and server are stored in `private_key.json` and `public_key.json`, respectively, and in their respective directories. They are represented as JSON Web Keys: JSON objects which represent either an ECDSA or RSA private key. The host keys trusted by the client are stored in `trusted_hosts.json` and contain a mapping of an internet address, `:`, to a JSON Web Key which is a JSON object representing either an ECDSA or RSA public key of the trusted server. The client keys trusted by the server are stored in `trusted_clients.json` and contain an array of JSON objects which contain a comment field which can be used describe the key and a JSON Web Key which is a JSON object representing either an ECDSA or RSA public key of the trusted client. + +To start the server, run: + +``` +$ go run server.go +``` + +This starts an HTTPS server which listens on `localhost:8888`. The server configures itself with a certificate which is valid for both `localhost` and `127.0.0.1` and uses the key from `server_data/private_key.json`. It accepts connections from clients which present a certificate for a key that it is configured to trust from the `trusted_clients.json` file and returns a simple 'hello' message. + +To make a request using the client, run: + +``` +$ go run client.go +``` + +This command creates an HTTPS client which makes a GET request to `https://localhost:8888`. The client configures itself with a certificate using the key from `client_data/private_key.json`. It only connects to a server which presents a certificate signed by the key specified for the `localhost:8888` address from `client_data/trusted_hosts.json` and made to be used for the `localhost` hostname. If the connection succeeds, it prints the response from the server. + +The file `gencert.go` can be used to generate PEM encoded version of the client key and certificate. If you save them to `key.pem` and `cert.pem` respectively, you can use them with `curl` to test out the server (if it is still running). + +``` +curl --cert cert.pem --key key.pem -k https://localhost:8888 +``` diff --git a/tests/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/client.go b/tests/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/client.go new file mode 100644 index 0000000000..0a699a0ee2 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/client.go @@ -0,0 +1,89 @@ +package main + +import ( + "crypto/tls" + "fmt" + "io/ioutil" + "log" + "net" + "net/http" + + "github.com/docker/libtrust" +) + +var ( + serverAddress = "localhost:8888" + privateKeyFilename = "client_data/private_key.pem" + trustedHostsFilename = "client_data/trusted_hosts.pem" +) + +func main() { + // Load Client Key. + clientKey, err := libtrust.LoadKeyFile(privateKeyFilename) + if err != nil { + log.Fatal(err) + } + + // Generate Client Certificate. + selfSignedClientCert, err := libtrust.GenerateSelfSignedClientCert(clientKey) + if err != nil { + log.Fatal(err) + } + + // Load trusted host keys. + hostKeys, err := libtrust.LoadKeySetFile(trustedHostsFilename) + if err != nil { + log.Fatal(err) + } + + // Ensure the host we want to connect to is trusted! + host, _, err := net.SplitHostPort(serverAddress) + if err != nil { + log.Fatal(err) + } + serverKeys, err := libtrust.FilterByHosts(hostKeys, host, false) + if err != nil { + log.Fatalf("%q is not a known and trusted host", host) + } + + // Generate a CA pool with the trusted host's key. + caPool, err := libtrust.GenerateCACertPool(clientKey, serverKeys) + if err != nil { + log.Fatal(err) + } + + // Create HTTP Client. + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + Certificates: []tls.Certificate{ + tls.Certificate{ + Certificate: [][]byte{selfSignedClientCert.Raw}, + PrivateKey: clientKey.CryptoPrivateKey(), + Leaf: selfSignedClientCert, + }, + }, + RootCAs: caPool, + }, + }, + } + + var makeRequest = func(url string) { + resp, err := client.Get(url) + if err != nil { + log.Fatal(err) + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + log.Fatal(err) + } + + log.Println(resp.Status) + log.Println(string(body)) + } + + // Make the request to the trusted server! + makeRequest(fmt.Sprintf("https://%s", serverAddress)) +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/gencert.go b/tests/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/gencert.go new file mode 100644 index 0000000000..c65f3b6b44 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/gencert.go @@ -0,0 +1,62 @@ +package main + +import ( + "encoding/pem" + "fmt" + "log" + "net" + + "github.com/docker/libtrust" +) + +var ( + serverAddress = "localhost:8888" + clientPrivateKeyFilename = "client_data/private_key.pem" + trustedHostsFilename = "client_data/trusted_hosts.pem" +) + +func main() { + key, err := libtrust.LoadKeyFile(clientPrivateKeyFilename) + if err != nil { + log.Fatal(err) + } + + keyPEMBlock, err := key.PEMBlock() + if err != nil { + log.Fatal(err) + } + + encodedPrivKey := pem.EncodeToMemory(keyPEMBlock) + fmt.Printf("Client Key:\n\n%s\n", string(encodedPrivKey)) + + cert, err := libtrust.GenerateSelfSignedClientCert(key) + if err != nil { + log.Fatal(err) + } + + encodedCert := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}) + fmt.Printf("Client Cert:\n\n%s\n", string(encodedCert)) + + trustedServerKeys, err := libtrust.LoadKeySetFile(trustedHostsFilename) + if err != nil { + log.Fatal(err) + } + + hostname, _, err := net.SplitHostPort(serverAddress) + if err != nil { + log.Fatal(err) + } + + trustedServerKeys, err = libtrust.FilterByHosts(trustedServerKeys, hostname, false) + if err != nil { + log.Fatal(err) + } + + caCert, err := libtrust.GenerateCACert(key, trustedServerKeys[0]) + if err != nil { + log.Fatal(err) + } + + encodedCert = pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: caCert.Raw}) + fmt.Printf("CA Cert:\n\n%s\n", string(encodedCert)) +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/genkeys.go b/tests/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/genkeys.go new file mode 100644 index 0000000000..9dc8842ad9 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/genkeys.go @@ -0,0 +1,61 @@ +package main + +import ( + "log" + + "github.com/docker/libtrust" +) + +func main() { + // Generate client key. + clientKey, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + log.Fatal(err) + } + + // Add a comment for the client key. + clientKey.AddExtendedField("comment", "TLS Demo Client") + + // Save the client key, public and private versions. + err = libtrust.SaveKey("client_data/private_key.pem", clientKey) + if err != nil { + log.Fatal(err) + } + + err = libtrust.SavePublicKey("client_data/public_key.pem", clientKey.PublicKey()) + if err != nil { + log.Fatal(err) + } + + // Generate server key. + serverKey, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + log.Fatal(err) + } + + // Set the list of addresses to use for the server. + serverKey.AddExtendedField("hosts", []string{"localhost", "docker.example.com"}) + + // Save the server key, public and private versions. + err = libtrust.SaveKey("server_data/private_key.pem", serverKey) + if err != nil { + log.Fatal(err) + } + + err = libtrust.SavePublicKey("server_data/public_key.pem", serverKey.PublicKey()) + if err != nil { + log.Fatal(err) + } + + // Generate Authorized Keys file for server. + err = libtrust.AddKeySetFile("server_data/trusted_clients.pem", clientKey.PublicKey()) + if err != nil { + log.Fatal(err) + } + + // Generate Known Host Keys file for client. + err = libtrust.AddKeySetFile("client_data/trusted_hosts.pem", serverKey.PublicKey()) + if err != nil { + log.Fatal(err) + } +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/server.go b/tests/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/server.go new file mode 100644 index 0000000000..d3cb2ea91f --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/server.go @@ -0,0 +1,80 @@ +package main + +import ( + "crypto/tls" + "fmt" + "html" + "log" + "net" + "net/http" + + "github.com/docker/libtrust" +) + +var ( + serverAddress = "localhost:8888" + privateKeyFilename = "server_data/private_key.pem" + authorizedClientsFilename = "server_data/trusted_clients.pem" +) + +func requestHandler(w http.ResponseWriter, r *http.Request) { + clientCert := r.TLS.PeerCertificates[0] + keyID := clientCert.Subject.CommonName + log.Printf("Request from keyID: %s\n", keyID) + fmt.Fprintf(w, "Hello, client! I'm a server! And you are %T: %s.\n", clientCert.PublicKey, html.EscapeString(keyID)) +} + +func main() { + // Load server key. + serverKey, err := libtrust.LoadKeyFile(privateKeyFilename) + if err != nil { + log.Fatal(err) + } + + // Generate server certificate. + selfSignedServerCert, err := libtrust.GenerateSelfSignedServerCert( + serverKey, []string{"localhost"}, []net.IP{net.ParseIP("127.0.0.1")}, + ) + if err != nil { + log.Fatal(err) + } + + // Load authorized client keys. + authorizedClients, err := libtrust.LoadKeySetFile(authorizedClientsFilename) + if err != nil { + log.Fatal(err) + } + + // Create CA pool using trusted client keys. + caPool, err := libtrust.GenerateCACertPool(serverKey, authorizedClients) + if err != nil { + log.Fatal(err) + } + + // Create TLS config, requiring client certificates. + tlsConfig := &tls.Config{ + Certificates: []tls.Certificate{ + tls.Certificate{ + Certificate: [][]byte{selfSignedServerCert.Raw}, + PrivateKey: serverKey.CryptoPrivateKey(), + Leaf: selfSignedServerCert, + }, + }, + ClientAuth: tls.RequireAndVerifyClientCert, + ClientCAs: caPool, + } + + // Create HTTP server with simple request handler. + server := &http.Server{ + Addr: serverAddress, + Handler: http.HandlerFunc(requestHandler), + } + + // Listen and server HTTPS using the libtrust TLS config. + listener, err := net.Listen("tcp", server.Addr) + if err != nil { + log.Fatal(err) + } + tlsListener := tls.NewListener(listener, tlsConfig) + server.Serve(tlsListener) +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/graph.go b/tests/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/graph.go new file mode 100644 index 0000000000..72b0fc3664 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/graph.go @@ -0,0 +1,50 @@ +package trustgraph + +import "github.com/docker/libtrust" + +// TrustGraph represents a graph of authorization mapping +// public keys to nodes and grants between nodes. +type TrustGraph interface { + // Verifies that the given public key is allowed to perform + // the given action on the given node according to the trust + // graph. + Verify(libtrust.PublicKey, string, uint16) (bool, error) + + // GetGrants returns an array of all grant chains which are used to + // allow the requested permission. + GetGrants(libtrust.PublicKey, string, uint16) ([][]*Grant, error) +} + +// Grant represents a transfer of permission from one part of the +// trust graph to another. This is the only way to delegate +// permission between two different sub trees in the graph. +type Grant struct { + // Subject is the namespace being granted + Subject string + + // Permissions is a bit map of permissions + Permission uint16 + + // Grantee represents the node being granted + // a permission scope. The grantee can be + // either a namespace item or a key id where namespace + // items will always start with a '/'. + Grantee string + + // statement represents the statement used to create + // this object. + statement *Statement +} + +// Permissions +// Read node 0x01 (can read node, no sub nodes) +// Write node 0x02 (can write to node object, cannot create subnodes) +// Read subtree 0x04 (delegates read to each sub node) +// Write subtree 0x08 (delegates write to each sub node, included create on the subject) +// +// Permission shortcuts +// ReadItem = 0x01 +// WriteItem = 0x03 +// ReadAccess = 0x07 +// WriteAccess = 0x0F +// Delegate = 0x0F diff --git a/tests/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph.go b/tests/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph.go new file mode 100644 index 0000000000..247bfa7aa6 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph.go @@ -0,0 +1,133 @@ +package trustgraph + +import ( + "strings" + + "github.com/docker/libtrust" +) + +type grantNode struct { + grants []*Grant + children map[string]*grantNode +} + +type memoryGraph struct { + roots map[string]*grantNode +} + +func newGrantNode() *grantNode { + return &grantNode{ + grants: []*Grant{}, + children: map[string]*grantNode{}, + } +} + +// NewMemoryGraph returns a new in memory trust graph created from +// a static list of grants. This graph is immutable after creation +// and any alterations should create a new instance. +func NewMemoryGraph(grants []*Grant) TrustGraph { + roots := map[string]*grantNode{} + for _, grant := range grants { + parts := strings.Split(grant.Grantee, "/") + nodes := roots + var node *grantNode + var nodeOk bool + for _, part := range parts { + node, nodeOk = nodes[part] + if !nodeOk { + node = newGrantNode() + nodes[part] = node + } + if part != "" { + node.grants = append(node.grants, grant) + } + nodes = node.children + } + } + return &memoryGraph{roots} +} + +func (g *memoryGraph) getGrants(name string) []*Grant { + nameParts := strings.Split(name, "/") + nodes := g.roots + var node *grantNode + var nodeOk bool + for _, part := range nameParts { + node, nodeOk = nodes[part] + if !nodeOk { + return nil + } + nodes = node.children + } + return node.grants +} + +func isSubName(name, sub string) bool { + if strings.HasPrefix(name, sub) { + if len(name) == len(sub) || name[len(sub)] == '/' { + return true + } + } + return false +} + +type walkFunc func(*Grant, []*Grant) bool + +func foundWalkFunc(*Grant, []*Grant) bool { + return true +} + +func (g *memoryGraph) walkGrants(start, target string, permission uint16, f walkFunc, chain []*Grant, visited map[*Grant]bool, collect bool) bool { + if visited == nil { + visited = map[*Grant]bool{} + } + grants := g.getGrants(start) + subGrants := make([]*Grant, 0, len(grants)) + for _, grant := range grants { + if visited[grant] { + continue + } + visited[grant] = true + if grant.Permission&permission == permission { + if isSubName(target, grant.Subject) { + if f(grant, chain) { + return true + } + } else { + subGrants = append(subGrants, grant) + } + } + } + for _, grant := range subGrants { + var chainCopy []*Grant + if collect { + chainCopy = make([]*Grant, len(chain)+1) + copy(chainCopy, chain) + chainCopy[len(chainCopy)-1] = grant + } else { + chainCopy = nil + } + + if g.walkGrants(grant.Subject, target, permission, f, chainCopy, visited, collect) { + return true + } + } + return false +} + +func (g *memoryGraph) Verify(key libtrust.PublicKey, node string, permission uint16) (bool, error) { + return g.walkGrants(key.KeyID(), node, permission, foundWalkFunc, nil, nil, false), nil +} + +func (g *memoryGraph) GetGrants(key libtrust.PublicKey, node string, permission uint16) ([][]*Grant, error) { + grants := [][]*Grant{} + collect := func(grant *Grant, chain []*Grant) bool { + grantChain := make([]*Grant, len(chain)+1) + copy(grantChain, chain) + grantChain[len(grantChain)-1] = grant + grants = append(grants, grantChain) + return false + } + g.walkGrants(key.KeyID(), node, permission, collect, nil, nil, true) + return grants, nil +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph_test.go b/tests/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph_test.go new file mode 100644 index 0000000000..49fd0f3b54 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph_test.go @@ -0,0 +1,174 @@ +package trustgraph + +import ( + "fmt" + "testing" + + "github.com/docker/libtrust" +) + +func createTestKeysAndGrants(count int) ([]*Grant, []libtrust.PrivateKey) { + grants := make([]*Grant, count) + keys := make([]libtrust.PrivateKey, count) + for i := 0; i < count; i++ { + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + panic(err) + } + grant := &Grant{ + Subject: fmt.Sprintf("/user-%d", i+1), + Permission: 0x0f, + Grantee: pk.KeyID(), + } + keys[i] = pk + grants[i] = grant + } + return grants, keys +} + +func testVerified(t *testing.T, g TrustGraph, k libtrust.PublicKey, keyName, target string, permission uint16) { + if ok, err := g.Verify(k, target, permission); err != nil { + t.Fatalf("Unexpected error during verification: %s", err) + } else if !ok { + t.Errorf("key failed verification\n\tKey: %s(%s)\n\tNamespace: %s", keyName, k.KeyID(), target) + } +} + +func testNotVerified(t *testing.T, g TrustGraph, k libtrust.PublicKey, keyName, target string, permission uint16) { + if ok, err := g.Verify(k, target, permission); err != nil { + t.Fatalf("Unexpected error during verification: %s", err) + } else if ok { + t.Errorf("key should have failed verification\n\tKey: %s(%s)\n\tNamespace: %s", keyName, k.KeyID(), target) + } +} + +func TestVerify(t *testing.T) { + grants, keys := createTestKeysAndGrants(4) + extraGrants := make([]*Grant, 3) + extraGrants[0] = &Grant{ + Subject: "/user-3", + Permission: 0x0f, + Grantee: "/user-2", + } + extraGrants[1] = &Grant{ + Subject: "/user-3/sub-project", + Permission: 0x0f, + Grantee: "/user-4", + } + extraGrants[2] = &Grant{ + Subject: "/user-4", + Permission: 0x07, + Grantee: "/user-1", + } + grants = append(grants, extraGrants...) + + g := NewMemoryGraph(grants) + + testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-1", 0x0f) + testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-1/some-project/sub-value", 0x0f) + testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-4", 0x07) + testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-2/", 0x0f) + testVerified(t, g, keys[2].PublicKey(), "user-key-3", "/user-3/sub-value", 0x0f) + testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3/sub-value", 0x0f) + testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3", 0x0f) + testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3/", 0x0f) + testVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-3/sub-project", 0x0f) + testVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-3/sub-project/app", 0x0f) + testVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-4", 0x0f) + + testNotVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-2", 0x0f) + testNotVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-3/sub-value", 0x0f) + testNotVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-4", 0x0f) + testNotVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-1/", 0x0f) + testNotVerified(t, g, keys[2].PublicKey(), "user-key-3", "/user-2", 0x0f) + testNotVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-4", 0x0f) + testNotVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-3", 0x0f) +} + +func TestCircularWalk(t *testing.T) { + grants, keys := createTestKeysAndGrants(3) + user1Grant := &Grant{ + Subject: "/user-2", + Permission: 0x0f, + Grantee: "/user-1", + } + user2Grant := &Grant{ + Subject: "/user-1", + Permission: 0x0f, + Grantee: "/user-2", + } + grants = append(grants, user1Grant, user2Grant) + + g := NewMemoryGraph(grants) + + testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-1", 0x0f) + testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-2", 0x0f) + testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-2", 0x0f) + testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-1", 0x0f) + testVerified(t, g, keys[2].PublicKey(), "user-key-3", "/user-3", 0x0f) + + testNotVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-3", 0x0f) + testNotVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3", 0x0f) +} + +func assertGrantSame(t *testing.T, actual, expected *Grant) { + if actual != expected { + t.Fatalf("Unexpected grant retrieved\n\tExpected: %v\n\tActual: %v", expected, actual) + } +} + +func TestGetGrants(t *testing.T) { + grants, keys := createTestKeysAndGrants(5) + extraGrants := make([]*Grant, 4) + extraGrants[0] = &Grant{ + Subject: "/user-3/friend-project", + Permission: 0x0f, + Grantee: "/user-2/friends", + } + extraGrants[1] = &Grant{ + Subject: "/user-3/sub-project", + Permission: 0x0f, + Grantee: "/user-4", + } + extraGrants[2] = &Grant{ + Subject: "/user-2/friends", + Permission: 0x0f, + Grantee: "/user-5/fun-project", + } + extraGrants[3] = &Grant{ + Subject: "/user-5/fun-project", + Permission: 0x0f, + Grantee: "/user-1", + } + grants = append(grants, extraGrants...) + + g := NewMemoryGraph(grants) + + grantChains, err := g.GetGrants(keys[3], "/user-3/sub-project/specific-app", 0x0f) + if err != nil { + t.Fatalf("Error getting grants: %s", err) + } + if len(grantChains) != 1 { + t.Fatalf("Expected number of grant chains returned, expected %d, received %d", 1, len(grantChains)) + } + if len(grantChains[0]) != 2 { + t.Fatalf("Unexpected number of grants retrieved\n\tExpected: %d\n\tActual: %d", 2, len(grantChains[0])) + } + assertGrantSame(t, grantChains[0][0], grants[3]) + assertGrantSame(t, grantChains[0][1], extraGrants[1]) + + grantChains, err = g.GetGrants(keys[0], "/user-3/friend-project/fun-app", 0x0f) + if err != nil { + t.Fatalf("Error getting grants: %s", err) + } + if len(grantChains) != 1 { + t.Fatalf("Expected number of grant chains returned, expected %d, received %d", 1, len(grantChains)) + } + if len(grantChains[0]) != 4 { + t.Fatalf("Unexpected number of grants retrieved\n\tExpected: %d\n\tActual: %d", 2, len(grantChains[0])) + } + assertGrantSame(t, grantChains[0][0], grants[0]) + assertGrantSame(t, grantChains[0][1], extraGrants[3]) + assertGrantSame(t, grantChains[0][2], extraGrants[2]) + assertGrantSame(t, grantChains[0][3], extraGrants[0]) +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement.go b/tests/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement.go new file mode 100644 index 0000000000..7a74b553cd --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement.go @@ -0,0 +1,227 @@ +package trustgraph + +import ( + "crypto/x509" + "encoding/json" + "io" + "io/ioutil" + "sort" + "strings" + "time" + + "github.com/docker/libtrust" +) + +type jsonGrant struct { + Subject string `json:"subject"` + Permission uint16 `json:"permission"` + Grantee string `json:"grantee"` +} + +type jsonRevocation struct { + Subject string `json:"subject"` + Revocation uint16 `json:"revocation"` + Grantee string `json:"grantee"` +} + +type jsonStatement struct { + Revocations []*jsonRevocation `json:"revocations"` + Grants []*jsonGrant `json:"grants"` + Expiration time.Time `json:"expiration"` + IssuedAt time.Time `json:"issuedAt"` +} + +func (g *jsonGrant) Grant(statement *Statement) *Grant { + return &Grant{ + Subject: g.Subject, + Permission: g.Permission, + Grantee: g.Grantee, + statement: statement, + } +} + +// Statement represents a set of grants made from a verifiable +// authority. A statement has an expiration associated with it +// set by the authority. +type Statement struct { + jsonStatement + + signature *libtrust.JSONSignature +} + +// IsExpired returns whether the statement has expired +func (s *Statement) IsExpired() bool { + return s.Expiration.Before(time.Now().Add(-10 * time.Second)) +} + +// Bytes returns an indented json representation of the statement +// in a byte array. This value can be written to a file or stream +// without alteration. +func (s *Statement) Bytes() ([]byte, error) { + return s.signature.PrettySignature("signatures") +} + +// LoadStatement loads and verifies a statement from an input stream. +func LoadStatement(r io.Reader, authority *x509.CertPool) (*Statement, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + js, err := libtrust.ParsePrettySignature(b, "signatures") + if err != nil { + return nil, err + } + payload, err := js.Payload() + if err != nil { + return nil, err + } + var statement Statement + err = json.Unmarshal(payload, &statement.jsonStatement) + if err != nil { + return nil, err + } + + if authority == nil { + _, err = js.Verify() + if err != nil { + return nil, err + } + } else { + _, err = js.VerifyChains(authority) + if err != nil { + return nil, err + } + } + statement.signature = js + + return &statement, nil +} + +// CreateStatements creates and signs a statement from a stream of grants +// and revocations in a JSON array. +func CreateStatement(grants, revocations io.Reader, expiration time.Duration, key libtrust.PrivateKey, chain []*x509.Certificate) (*Statement, error) { + var statement Statement + err := json.NewDecoder(grants).Decode(&statement.jsonStatement.Grants) + if err != nil { + return nil, err + } + err = json.NewDecoder(revocations).Decode(&statement.jsonStatement.Revocations) + if err != nil { + return nil, err + } + statement.jsonStatement.Expiration = time.Now().UTC().Add(expiration) + statement.jsonStatement.IssuedAt = time.Now().UTC() + + b, err := json.MarshalIndent(&statement.jsonStatement, "", " ") + if err != nil { + return nil, err + } + + statement.signature, err = libtrust.NewJSONSignature(b) + if err != nil { + return nil, err + } + err = statement.signature.SignWithChain(key, chain) + if err != nil { + return nil, err + } + + return &statement, nil +} + +type statementList []*Statement + +func (s statementList) Len() int { + return len(s) +} + +func (s statementList) Less(i, j int) bool { + return s[i].IssuedAt.Before(s[j].IssuedAt) +} + +func (s statementList) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// CollapseStatements returns a single list of the valid statements as well as the +// time when the next grant will expire. +func CollapseStatements(statements []*Statement, useExpired bool) ([]*Grant, time.Time, error) { + sorted := make(statementList, 0, len(statements)) + for _, statement := range statements { + if useExpired || !statement.IsExpired() { + sorted = append(sorted, statement) + } + } + sort.Sort(sorted) + + var minExpired time.Time + var grantCount int + roots := map[string]*grantNode{} + for i, statement := range sorted { + if statement.Expiration.Before(minExpired) || i == 0 { + minExpired = statement.Expiration + } + for _, grant := range statement.Grants { + parts := strings.Split(grant.Grantee, "/") + nodes := roots + g := grant.Grant(statement) + grantCount = grantCount + 1 + + for _, part := range parts { + node, nodeOk := nodes[part] + if !nodeOk { + node = newGrantNode() + nodes[part] = node + } + node.grants = append(node.grants, g) + nodes = node.children + } + } + + for _, revocation := range statement.Revocations { + parts := strings.Split(revocation.Grantee, "/") + nodes := roots + + var node *grantNode + var nodeOk bool + for _, part := range parts { + node, nodeOk = nodes[part] + if !nodeOk { + break + } + nodes = node.children + } + if node != nil { + for _, grant := range node.grants { + if isSubName(grant.Subject, revocation.Subject) { + grant.Permission = grant.Permission &^ revocation.Revocation + } + } + } + } + } + + retGrants := make([]*Grant, 0, grantCount) + for _, rootNodes := range roots { + retGrants = append(retGrants, rootNodes.grants...) + } + + return retGrants, minExpired, nil +} + +// FilterStatements filters the statements to statements including the given grants. +func FilterStatements(grants []*Grant) ([]*Statement, error) { + statements := map[*Statement]bool{} + for _, grant := range grants { + if grant.statement != nil { + statements[grant.statement] = true + } + } + retStatements := make([]*Statement, len(statements)) + var i int + for statement := range statements { + retStatements[i] = statement + i++ + } + return retStatements, nil +} diff --git a/tests/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement_test.go b/tests/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement_test.go new file mode 100644 index 0000000000..e509468659 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement_test.go @@ -0,0 +1,417 @@ +package trustgraph + +import ( + "bytes" + "crypto/x509" + "encoding/json" + "testing" + "time" + + "github.com/docker/libtrust" + "github.com/docker/libtrust/testutil" +) + +const testStatementExpiration = time.Hour * 5 + +func generateStatement(grants []*Grant, key libtrust.PrivateKey, chain []*x509.Certificate) (*Statement, error) { + var statement Statement + + statement.Grants = make([]*jsonGrant, len(grants)) + for i, grant := range grants { + statement.Grants[i] = &jsonGrant{ + Subject: grant.Subject, + Permission: grant.Permission, + Grantee: grant.Grantee, + } + } + statement.IssuedAt = time.Now() + statement.Expiration = time.Now().Add(testStatementExpiration) + statement.Revocations = make([]*jsonRevocation, 0) + + marshalled, err := json.MarshalIndent(statement.jsonStatement, "", " ") + if err != nil { + return nil, err + } + + sig, err := libtrust.NewJSONSignature(marshalled) + if err != nil { + return nil, err + } + err = sig.SignWithChain(key, chain) + if err != nil { + return nil, err + } + statement.signature = sig + + return &statement, nil +} + +func generateTrustChain(t *testing.T, chainLen int) (libtrust.PrivateKey, *x509.CertPool, []*x509.Certificate) { + caKey, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("Error generating key: %s", err) + } + ca, err := testutil.GenerateTrustCA(caKey.CryptoPublicKey(), caKey.CryptoPrivateKey()) + if err != nil { + t.Fatalf("Error generating ca: %s", err) + } + + parent := ca + parentKey := caKey + chain := make([]*x509.Certificate, chainLen) + for i := chainLen - 1; i > 0; i-- { + intermediatekey, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("Error generate key: %s", err) + } + chain[i], err = testutil.GenerateIntermediate(intermediatekey.CryptoPublicKey(), parentKey.CryptoPrivateKey(), parent) + if err != nil { + t.Fatalf("Error generating intermdiate certificate: %s", err) + } + parent = chain[i] + parentKey = intermediatekey + } + trustKey, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("Error generate key: %s", err) + } + chain[0], err = testutil.GenerateTrustCert(trustKey.CryptoPublicKey(), parentKey.CryptoPrivateKey(), parent) + if err != nil { + t.Fatalf("Error generate trust cert: %s", err) + } + + caPool := x509.NewCertPool() + caPool.AddCert(ca) + + return trustKey, caPool, chain +} + +func TestLoadStatement(t *testing.T) { + grantCount := 4 + grants, _ := createTestKeysAndGrants(grantCount) + + trustKey, caPool, chain := generateTrustChain(t, 6) + + statement, err := generateStatement(grants, trustKey, chain) + if err != nil { + t.Fatalf("Error generating statement: %s", err) + } + + statementBytes, err := statement.Bytes() + if err != nil { + t.Fatalf("Error getting statement bytes: %s", err) + } + + s2, err := LoadStatement(bytes.NewReader(statementBytes), caPool) + if err != nil { + t.Fatalf("Error loading statement: %s", err) + } + if len(s2.Grants) != grantCount { + t.Fatalf("Unexpected grant length\n\tExpected: %d\n\tActual: %d", grantCount, len(s2.Grants)) + } + + pool := x509.NewCertPool() + _, err = LoadStatement(bytes.NewReader(statementBytes), pool) + if err == nil { + t.Fatalf("No error thrown verifying without an authority") + } else if _, ok := err.(x509.UnknownAuthorityError); !ok { + t.Fatalf("Unexpected error verifying without authority: %s", err) + } + + s2, err = LoadStatement(bytes.NewReader(statementBytes), nil) + if err != nil { + t.Fatalf("Error loading statement: %s", err) + } + if len(s2.Grants) != grantCount { + t.Fatalf("Unexpected grant length\n\tExpected: %d\n\tActual: %d", grantCount, len(s2.Grants)) + } + + badData := make([]byte, len(statementBytes)) + copy(badData, statementBytes) + badData[0] = '[' + _, err = LoadStatement(bytes.NewReader(badData), nil) + if err == nil { + t.Fatalf("No error thrown parsing bad json") + } + + alteredData := make([]byte, len(statementBytes)) + copy(alteredData, statementBytes) + alteredData[30] = '0' + _, err = LoadStatement(bytes.NewReader(alteredData), nil) + if err == nil { + t.Fatalf("No error thrown from bad data") + } +} + +func TestCollapseGrants(t *testing.T) { + grantCount := 8 + grants, keys := createTestKeysAndGrants(grantCount) + linkGrants := make([]*Grant, 4) + linkGrants[0] = &Grant{ + Subject: "/user-3", + Permission: 0x0f, + Grantee: "/user-2", + } + linkGrants[1] = &Grant{ + Subject: "/user-3/sub-project", + Permission: 0x0f, + Grantee: "/user-4", + } + linkGrants[2] = &Grant{ + Subject: "/user-6", + Permission: 0x0f, + Grantee: "/user-7", + } + linkGrants[3] = &Grant{ + Subject: "/user-6/sub-project/specific-app", + Permission: 0x0f, + Grantee: "/user-5", + } + trustKey, pool, chain := generateTrustChain(t, 3) + + statements := make([]*Statement, 3) + var err error + statements[0], err = generateStatement(grants[0:4], trustKey, chain) + if err != nil { + t.Fatalf("Error generating statement: %s", err) + } + statements[1], err = generateStatement(grants[4:], trustKey, chain) + if err != nil { + t.Fatalf("Error generating statement: %s", err) + } + statements[2], err = generateStatement(linkGrants, trustKey, chain) + if err != nil { + t.Fatalf("Error generating statement: %s", err) + } + + statementsCopy := make([]*Statement, len(statements)) + for i, statement := range statements { + b, err := statement.Bytes() + if err != nil { + t.Fatalf("Error getting statement bytes: %s", err) + } + verifiedStatement, err := LoadStatement(bytes.NewReader(b), pool) + if err != nil { + t.Fatalf("Error loading statement: %s", err) + } + // Force sort by reversing order + statementsCopy[len(statementsCopy)-i-1] = verifiedStatement + } + statements = statementsCopy + + collapsedGrants, expiration, err := CollapseStatements(statements, false) + if len(collapsedGrants) != 12 { + t.Fatalf("Unexpected number of grants\n\tExpected: %d\n\tActual: %d", 12, len(collapsedGrants)) + } + if expiration.After(time.Now().Add(time.Hour*5)) || expiration.Before(time.Now()) { + t.Fatalf("Unexpected expiration time: %s", expiration.String()) + } + g := NewMemoryGraph(collapsedGrants) + + testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-1", 0x0f) + testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-2", 0x0f) + testVerified(t, g, keys[2].PublicKey(), "user-key-3", "/user-3", 0x0f) + testVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-4", 0x0f) + testVerified(t, g, keys[4].PublicKey(), "user-key-5", "/user-5", 0x0f) + testVerified(t, g, keys[5].PublicKey(), "user-key-6", "/user-6", 0x0f) + testVerified(t, g, keys[6].PublicKey(), "user-key-7", "/user-7", 0x0f) + testVerified(t, g, keys[7].PublicKey(), "user-key-8", "/user-8", 0x0f) + testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3", 0x0f) + testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3/sub-project/specific-app", 0x0f) + testVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-3/sub-project", 0x0f) + testVerified(t, g, keys[6].PublicKey(), "user-key-7", "/user-6", 0x0f) + testVerified(t, g, keys[6].PublicKey(), "user-key-7", "/user-6/sub-project/specific-app", 0x0f) + testVerified(t, g, keys[4].PublicKey(), "user-key-5", "/user-6/sub-project/specific-app", 0x0f) + + testNotVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-3", 0x0f) + testNotVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-6/sub-project", 0x0f) + testNotVerified(t, g, keys[4].PublicKey(), "user-key-5", "/user-6/sub-project", 0x0f) + + // Add revocation grant + statements = append(statements, &Statement{ + jsonStatement{ + IssuedAt: time.Now(), + Expiration: time.Now().Add(testStatementExpiration), + Grants: []*jsonGrant{}, + Revocations: []*jsonRevocation{ + &jsonRevocation{ + Subject: "/user-1", + Revocation: 0x0f, + Grantee: keys[0].KeyID(), + }, + &jsonRevocation{ + Subject: "/user-2", + Revocation: 0x08, + Grantee: keys[1].KeyID(), + }, + &jsonRevocation{ + Subject: "/user-6", + Revocation: 0x0f, + Grantee: "/user-7", + }, + &jsonRevocation{ + Subject: "/user-9", + Revocation: 0x0f, + Grantee: "/user-10", + }, + }, + }, + nil, + }) + + collapsedGrants, expiration, err = CollapseStatements(statements, false) + if len(collapsedGrants) != 12 { + t.Fatalf("Unexpected number of grants\n\tExpected: %d\n\tActual: %d", 12, len(collapsedGrants)) + } + if expiration.After(time.Now().Add(time.Hour*5)) || expiration.Before(time.Now()) { + t.Fatalf("Unexpected expiration time: %s", expiration.String()) + } + g = NewMemoryGraph(collapsedGrants) + + testNotVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-1", 0x0f) + testNotVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-2", 0x0f) + testNotVerified(t, g, keys[6].PublicKey(), "user-key-7", "/user-6/sub-project/specific-app", 0x0f) + + testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-2", 0x07) +} + +func TestFilterStatements(t *testing.T) { + grantCount := 8 + grants, keys := createTestKeysAndGrants(grantCount) + linkGrants := make([]*Grant, 3) + linkGrants[0] = &Grant{ + Subject: "/user-3", + Permission: 0x0f, + Grantee: "/user-2", + } + linkGrants[1] = &Grant{ + Subject: "/user-5", + Permission: 0x0f, + Grantee: "/user-4", + } + linkGrants[2] = &Grant{ + Subject: "/user-7", + Permission: 0x0f, + Grantee: "/user-6", + } + + trustKey, _, chain := generateTrustChain(t, 3) + + statements := make([]*Statement, 5) + var err error + statements[0], err = generateStatement(grants[0:2], trustKey, chain) + if err != nil { + t.Fatalf("Error generating statement: %s", err) + } + statements[1], err = generateStatement(grants[2:4], trustKey, chain) + if err != nil { + t.Fatalf("Error generating statement: %s", err) + } + statements[2], err = generateStatement(grants[4:6], trustKey, chain) + if err != nil { + t.Fatalf("Error generating statement: %s", err) + } + statements[3], err = generateStatement(grants[6:], trustKey, chain) + if err != nil { + t.Fatalf("Error generating statement: %s", err) + } + statements[4], err = generateStatement(linkGrants, trustKey, chain) + if err != nil { + t.Fatalf("Error generating statement: %s", err) + } + collapsed, _, err := CollapseStatements(statements, false) + if err != nil { + t.Fatalf("Error collapsing grants: %s", err) + } + + // Filter 1, all 5 statements + filter1, err := FilterStatements(collapsed) + if err != nil { + t.Fatalf("Error filtering statements: %s", err) + } + if len(filter1) != 5 { + t.Fatalf("Wrong number of statements, expected %d, received %d", 5, len(filter1)) + } + + // Filter 2, one statement + filter2, err := FilterStatements([]*Grant{collapsed[0]}) + if err != nil { + t.Fatalf("Error filtering statements: %s", err) + } + if len(filter2) != 1 { + t.Fatalf("Wrong number of statements, expected %d, received %d", 1, len(filter2)) + } + + // Filter 3, 2 statements, from graph lookup + g := NewMemoryGraph(collapsed) + lookupGrants, err := g.GetGrants(keys[1], "/user-3", 0x0f) + if err != nil { + t.Fatalf("Error looking up grants: %s", err) + } + if len(lookupGrants) != 1 { + t.Fatalf("Wrong numberof grant chains returned from lookup, expected %d, received %d", 1, len(lookupGrants)) + } + if len(lookupGrants[0]) != 2 { + t.Fatalf("Wrong number of grants looked up, expected %d, received %d", 2, len(lookupGrants)) + } + filter3, err := FilterStatements(lookupGrants[0]) + if err != nil { + t.Fatalf("Error filtering statements: %s", err) + } + if len(filter3) != 2 { + t.Fatalf("Wrong number of statements, expected %d, received %d", 2, len(filter3)) + } + +} + +func TestCreateStatement(t *testing.T) { + grantJSON := bytes.NewReader([]byte(`[ + { + "subject": "/user-2", + "permission": 15, + "grantee": "/user-1" + }, + { + "subject": "/user-7", + "permission": 1, + "grantee": "/user-9" + }, + { + "subject": "/user-3", + "permission": 15, + "grantee": "/user-2" + } +]`)) + revocationJSON := bytes.NewReader([]byte(`[ + { + "subject": "user-8", + "revocation": 12, + "grantee": "user-9" + } +]`)) + + trustKey, pool, chain := generateTrustChain(t, 3) + + statement, err := CreateStatement(grantJSON, revocationJSON, testStatementExpiration, trustKey, chain) + if err != nil { + t.Fatalf("Error creating statement: %s", err) + } + + b, err := statement.Bytes() + if err != nil { + t.Fatalf("Error retrieving bytes: %s", err) + } + + verified, err := LoadStatement(bytes.NewReader(b), pool) + if err != nil { + t.Fatalf("Error loading statement: %s", err) + } + + if len(verified.Grants) != 3 { + t.Errorf("Unexpected number of grants, expected %d, received %d", 3, len(verified.Grants)) + } + + if len(verified.Revocations) != 1 { + t.Errorf("Unexpected number of revocations, expected %d, received %d", 1, len(verified.Revocations)) + } +} diff --git a/tests/_vendor/src/github.com/docker/libtrust/util.go b/tests/Godeps/_workspace/src/github.com/docker/libtrust/util.go similarity index 91% rename from tests/_vendor/src/github.com/docker/libtrust/util.go rename to tests/Godeps/_workspace/src/github.com/docker/libtrust/util.go index 3b2fac95b1..4d5a6200a8 100644 --- a/tests/_vendor/src/github.com/docker/libtrust/util.go +++ b/tests/Godeps/_workspace/src/github.com/docker/libtrust/util.go @@ -2,6 +2,7 @@ package libtrust import ( "bytes" + "crypto" "crypto/elliptic" "crypto/x509" "encoding/base32" @@ -52,6 +53,21 @@ func keyIDEncode(b []byte) string { return buf.String() } +func keyIDFromCryptoKey(pubKey PublicKey) string { + // Generate and return a 'libtrust' fingerprint of the public key. + // For an RSA key this should be: + // SHA256(DER encoded ASN1) + // Then truncated to 240 bits and encoded into 12 base32 groups like so: + // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP + derBytes, err := x509.MarshalPKIXPublicKey(pubKey.CryptoPublicKey()) + if err != nil { + return "" + } + hasher := crypto.SHA256.New() + hasher.Write(derBytes) + return keyIDEncode(hasher.Sum(nil)[:30]) +} + func stringFromMap(m map[string]interface{}, key string) (string, error) { val, ok := m[key] if !ok { diff --git a/tests/Godeps/_workspace/src/github.com/docker/libtrust/util_test.go b/tests/Godeps/_workspace/src/github.com/docker/libtrust/util_test.go new file mode 100644 index 0000000000..ee54f5b8cc --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/docker/libtrust/util_test.go @@ -0,0 +1,23 @@ +package libtrust + +import ( + "encoding/pem" + "reflect" + "testing" +) + +func TestAddPEMHeadersToKey(t *testing.T) { + pk := &rsaPublicKey{nil, map[string]interface{}{}} + blk := &pem.Block{Headers: map[string]string{"hosts": "localhost,127.0.0.1"}} + addPEMHeadersToKey(blk, pk) + + val := pk.GetExtendedField("hosts") + hosts, ok := val.([]string) + if !ok { + t.Fatalf("hosts type(%v), expected []string", reflect.TypeOf(val)) + } + expected := []string{"localhost", "127.0.0.1"} + if !reflect.DeepEqual(hosts, expected) { + t.Errorf("hosts(%v), expected %v", hosts, expected) + } +} diff --git a/tests/Godeps/_workspace/src/github.com/gorilla/context/.travis.yml b/tests/Godeps/_workspace/src/github.com/gorilla/context/.travis.yml new file mode 100644 index 0000000000..d87d465768 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/gorilla/context/.travis.yml @@ -0,0 +1,7 @@ +language: go + +go: + - 1.0 + - 1.1 + - 1.2 + - tip diff --git a/tests/Godeps/_workspace/src/github.com/gorilla/context/LICENSE b/tests/Godeps/_workspace/src/github.com/gorilla/context/LICENSE new file mode 100644 index 0000000000..0e5fb87280 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/gorilla/context/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 Rodrigo Moraes. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/tests/Godeps/_workspace/src/github.com/gorilla/context/README.md b/tests/Godeps/_workspace/src/github.com/gorilla/context/README.md new file mode 100644 index 0000000000..c60a31b053 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/gorilla/context/README.md @@ -0,0 +1,7 @@ +context +======= +[![Build Status](https://travis-ci.org/gorilla/context.png?branch=master)](https://travis-ci.org/gorilla/context) + +gorilla/context is a general purpose registry for global request variables. + +Read the full documentation here: http://www.gorillatoolkit.org/pkg/context diff --git a/tests/Godeps/_workspace/src/github.com/gorilla/context/context.go b/tests/Godeps/_workspace/src/github.com/gorilla/context/context.go new file mode 100644 index 0000000000..81cb128b19 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/gorilla/context/context.go @@ -0,0 +1,143 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package context + +import ( + "net/http" + "sync" + "time" +) + +var ( + mutex sync.RWMutex + data = make(map[*http.Request]map[interface{}]interface{}) + datat = make(map[*http.Request]int64) +) + +// Set stores a value for a given key in a given request. +func Set(r *http.Request, key, val interface{}) { + mutex.Lock() + if data[r] == nil { + data[r] = make(map[interface{}]interface{}) + datat[r] = time.Now().Unix() + } + data[r][key] = val + mutex.Unlock() +} + +// Get returns a value stored for a given key in a given request. +func Get(r *http.Request, key interface{}) interface{} { + mutex.RLock() + if ctx := data[r]; ctx != nil { + value := ctx[key] + mutex.RUnlock() + return value + } + mutex.RUnlock() + return nil +} + +// GetOk returns stored value and presence state like multi-value return of map access. +func GetOk(r *http.Request, key interface{}) (interface{}, bool) { + mutex.RLock() + if _, ok := data[r]; ok { + value, ok := data[r][key] + mutex.RUnlock() + return value, ok + } + mutex.RUnlock() + return nil, false +} + +// GetAll returns all stored values for the request as a map. Nil is returned for invalid requests. +func GetAll(r *http.Request) map[interface{}]interface{} { + mutex.RLock() + if context, ok := data[r]; ok { + result := make(map[interface{}]interface{}, len(context)) + for k, v := range context { + result[k] = v + } + mutex.RUnlock() + return result + } + mutex.RUnlock() + return nil +} + +// GetAllOk returns all stored values for the request as a map and a boolean value that indicates if +// the request was registered. +func GetAllOk(r *http.Request) (map[interface{}]interface{}, bool) { + mutex.RLock() + context, ok := data[r] + result := make(map[interface{}]interface{}, len(context)) + for k, v := range context { + result[k] = v + } + mutex.RUnlock() + return result, ok +} + +// Delete removes a value stored for a given key in a given request. +func Delete(r *http.Request, key interface{}) { + mutex.Lock() + if data[r] != nil { + delete(data[r], key) + } + mutex.Unlock() +} + +// Clear removes all values stored for a given request. +// +// This is usually called by a handler wrapper to clean up request +// variables at the end of a request lifetime. See ClearHandler(). +func Clear(r *http.Request) { + mutex.Lock() + clear(r) + mutex.Unlock() +} + +// clear is Clear without the lock. +func clear(r *http.Request) { + delete(data, r) + delete(datat, r) +} + +// Purge removes request data stored for longer than maxAge, in seconds. +// It returns the amount of requests removed. +// +// If maxAge <= 0, all request data is removed. +// +// This is only used for sanity check: in case context cleaning was not +// properly set some request data can be kept forever, consuming an increasing +// amount of memory. In case this is detected, Purge() must be called +// periodically until the problem is fixed. +func Purge(maxAge int) int { + mutex.Lock() + count := 0 + if maxAge <= 0 { + count = len(data) + data = make(map[*http.Request]map[interface{}]interface{}) + datat = make(map[*http.Request]int64) + } else { + min := time.Now().Unix() - int64(maxAge) + for r := range data { + if datat[r] < min { + clear(r) + count++ + } + } + } + mutex.Unlock() + return count +} + +// ClearHandler wraps an http.Handler and clears request values at the end +// of a request lifetime. +func ClearHandler(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer Clear(r) + h.ServeHTTP(w, r) + }) +} diff --git a/tests/Godeps/_workspace/src/github.com/gorilla/context/context_test.go b/tests/Godeps/_workspace/src/github.com/gorilla/context/context_test.go new file mode 100644 index 0000000000..6ada8ec31f --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/gorilla/context/context_test.go @@ -0,0 +1,161 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package context + +import ( + "net/http" + "testing" +) + +type keyType int + +const ( + key1 keyType = iota + key2 +) + +func TestContext(t *testing.T) { + assertEqual := func(val interface{}, exp interface{}) { + if val != exp { + t.Errorf("Expected %v, got %v.", exp, val) + } + } + + r, _ := http.NewRequest("GET", "http://localhost:8080/", nil) + emptyR, _ := http.NewRequest("GET", "http://localhost:8080/", nil) + + // Get() + assertEqual(Get(r, key1), nil) + + // Set() + Set(r, key1, "1") + assertEqual(Get(r, key1), "1") + assertEqual(len(data[r]), 1) + + Set(r, key2, "2") + assertEqual(Get(r, key2), "2") + assertEqual(len(data[r]), 2) + + //GetOk + value, ok := GetOk(r, key1) + assertEqual(value, "1") + assertEqual(ok, true) + + value, ok = GetOk(r, "not exists") + assertEqual(value, nil) + assertEqual(ok, false) + + Set(r, "nil value", nil) + value, ok = GetOk(r, "nil value") + assertEqual(value, nil) + assertEqual(ok, true) + + // GetAll() + values := GetAll(r) + assertEqual(len(values), 3) + + // GetAll() for empty request + values = GetAll(emptyR) + if values != nil { + t.Error("GetAll didn't return nil value for invalid request") + } + + // GetAllOk() + values, ok = GetAllOk(r) + assertEqual(len(values), 3) + assertEqual(ok, true) + + // GetAllOk() for empty request + values, ok = GetAllOk(emptyR) + assertEqual(value, nil) + assertEqual(ok, false) + + // Delete() + Delete(r, key1) + assertEqual(Get(r, key1), nil) + assertEqual(len(data[r]), 2) + + Delete(r, key2) + assertEqual(Get(r, key2), nil) + assertEqual(len(data[r]), 1) + + // Clear() + Clear(r) + assertEqual(len(data), 0) +} + +func parallelReader(r *http.Request, key string, iterations int, wait, done chan struct{}) { + <-wait + for i := 0; i < iterations; i++ { + Get(r, key) + } + done <- struct{}{} + +} + +func parallelWriter(r *http.Request, key, value string, iterations int, wait, done chan struct{}) { + <-wait + for i := 0; i < iterations; i++ { + Get(r, key) + } + done <- struct{}{} + +} + +func benchmarkMutex(b *testing.B, numReaders, numWriters, iterations int) { + + b.StopTimer() + r, _ := http.NewRequest("GET", "http://localhost:8080/", nil) + done := make(chan struct{}) + b.StartTimer() + + for i := 0; i < b.N; i++ { + wait := make(chan struct{}) + + for i := 0; i < numReaders; i++ { + go parallelReader(r, "test", iterations, wait, done) + } + + for i := 0; i < numWriters; i++ { + go parallelWriter(r, "test", "123", iterations, wait, done) + } + + close(wait) + + for i := 0; i < numReaders+numWriters; i++ { + <-done + } + + } + +} + +func BenchmarkMutexSameReadWrite1(b *testing.B) { + benchmarkMutex(b, 1, 1, 32) +} +func BenchmarkMutexSameReadWrite2(b *testing.B) { + benchmarkMutex(b, 2, 2, 32) +} +func BenchmarkMutexSameReadWrite4(b *testing.B) { + benchmarkMutex(b, 4, 4, 32) +} +func BenchmarkMutex1(b *testing.B) { + benchmarkMutex(b, 2, 8, 32) +} +func BenchmarkMutex2(b *testing.B) { + benchmarkMutex(b, 16, 4, 64) +} +func BenchmarkMutex3(b *testing.B) { + benchmarkMutex(b, 1, 2, 128) +} +func BenchmarkMutex4(b *testing.B) { + benchmarkMutex(b, 128, 32, 256) +} +func BenchmarkMutex5(b *testing.B) { + benchmarkMutex(b, 1024, 2048, 64) +} +func BenchmarkMutex6(b *testing.B) { + benchmarkMutex(b, 2048, 1024, 512) +} diff --git a/tests/Godeps/_workspace/src/github.com/gorilla/context/doc.go b/tests/Godeps/_workspace/src/github.com/gorilla/context/doc.go new file mode 100644 index 0000000000..73c7400311 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/gorilla/context/doc.go @@ -0,0 +1,82 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package context stores values shared during a request lifetime. + +For example, a router can set variables extracted from the URL and later +application handlers can access those values, or it can be used to store +sessions values to be saved at the end of a request. There are several +others common uses. + +The idea was posted by Brad Fitzpatrick to the go-nuts mailing list: + + http://groups.google.com/group/golang-nuts/msg/e2d679d303aa5d53 + +Here's the basic usage: first define the keys that you will need. The key +type is interface{} so a key can be of any type that supports equality. +Here we define a key using a custom int type to avoid name collisions: + + package foo + + import ( + "github.com/gorilla/context" + ) + + type key int + + const MyKey key = 0 + +Then set a variable. Variables are bound to an http.Request object, so you +need a request instance to set a value: + + context.Set(r, MyKey, "bar") + +The application can later access the variable using the same key you provided: + + func MyHandler(w http.ResponseWriter, r *http.Request) { + // val is "bar". + val := context.Get(r, foo.MyKey) + + // returns ("bar", true) + val, ok := context.GetOk(r, foo.MyKey) + // ... + } + +And that's all about the basic usage. We discuss some other ideas below. + +Any type can be stored in the context. To enforce a given type, make the key +private and wrap Get() and Set() to accept and return values of a specific +type: + + type key int + + const mykey key = 0 + + // GetMyKey returns a value for this package from the request values. + func GetMyKey(r *http.Request) SomeType { + if rv := context.Get(r, mykey); rv != nil { + return rv.(SomeType) + } + return nil + } + + // SetMyKey sets a value for this package in the request values. + func SetMyKey(r *http.Request, val SomeType) { + context.Set(r, mykey, val) + } + +Variables must be cleared at the end of a request, to remove all values +that were stored. This can be done in an http.Handler, after a request was +served. Just call Clear() passing the request: + + context.Clear(r) + +...or use ClearHandler(), which conveniently wraps an http.Handler to clear +variables at the end of a request lifetime. + +The Routers from the packages gorilla/mux and gorilla/pat call Clear() +so if you are using either of them you don't need to clear the context manually. +*/ +package context diff --git a/tests/Godeps/_workspace/src/github.com/gorilla/mux/.travis.yml b/tests/Godeps/_workspace/src/github.com/gorilla/mux/.travis.yml new file mode 100644 index 0000000000..d87d465768 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/gorilla/mux/.travis.yml @@ -0,0 +1,7 @@ +language: go + +go: + - 1.0 + - 1.1 + - 1.2 + - tip diff --git a/tests/Godeps/_workspace/src/github.com/gorilla/mux/LICENSE b/tests/Godeps/_workspace/src/github.com/gorilla/mux/LICENSE new file mode 100644 index 0000000000..0e5fb87280 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/gorilla/mux/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 Rodrigo Moraes. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/tests/Godeps/_workspace/src/github.com/gorilla/mux/README.md b/tests/Godeps/_workspace/src/github.com/gorilla/mux/README.md new file mode 100644 index 0000000000..e60301b033 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/gorilla/mux/README.md @@ -0,0 +1,7 @@ +mux +=== +[![Build Status](https://travis-ci.org/gorilla/mux.png?branch=master)](https://travis-ci.org/gorilla/mux) + +gorilla/mux is a powerful URL router and dispatcher. + +Read the full documentation here: http://www.gorillatoolkit.org/pkg/mux diff --git a/tests/Godeps/_workspace/src/github.com/gorilla/mux/bench_test.go b/tests/Godeps/_workspace/src/github.com/gorilla/mux/bench_test.go new file mode 100644 index 0000000000..c5f97b2b2a --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/gorilla/mux/bench_test.go @@ -0,0 +1,21 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "net/http" + "testing" +) + +func BenchmarkMux(b *testing.B) { + router := new(Router) + handler := func(w http.ResponseWriter, r *http.Request) {} + router.HandleFunc("/v1/{v1}", handler) + + request, _ := http.NewRequest("GET", "/v1/anything", nil) + for i := 0; i < b.N; i++ { + router.ServeHTTP(nil, request) + } +} diff --git a/tests/Godeps/_workspace/src/github.com/gorilla/mux/doc.go b/tests/Godeps/_workspace/src/github.com/gorilla/mux/doc.go new file mode 100644 index 0000000000..b2deed34c4 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/gorilla/mux/doc.go @@ -0,0 +1,199 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package gorilla/mux implements a request router and dispatcher. + +The name mux stands for "HTTP request multiplexer". Like the standard +http.ServeMux, mux.Router matches incoming requests against a list of +registered routes and calls a handler for the route that matches the URL +or other conditions. The main features are: + + * Requests can be matched based on URL host, path, path prefix, schemes, + header and query values, HTTP methods or using custom matchers. + * URL hosts and paths can have variables with an optional regular + expression. + * Registered URLs can be built, or "reversed", which helps maintaining + references to resources. + * Routes can be used as subrouters: nested routes are only tested if the + parent route matches. This is useful to define groups of routes that + share common conditions like a host, a path prefix or other repeated + attributes. As a bonus, this optimizes request matching. + * It implements the http.Handler interface so it is compatible with the + standard http.ServeMux. + +Let's start registering a couple of URL paths and handlers: + + func main() { + r := mux.NewRouter() + r.HandleFunc("/", HomeHandler) + r.HandleFunc("/products", ProductsHandler) + r.HandleFunc("/articles", ArticlesHandler) + http.Handle("/", r) + } + +Here we register three routes mapping URL paths to handlers. This is +equivalent to how http.HandleFunc() works: if an incoming request URL matches +one of the paths, the corresponding handler is called passing +(http.ResponseWriter, *http.Request) as parameters. + +Paths can have variables. They are defined using the format {name} or +{name:pattern}. If a regular expression pattern is not defined, the matched +variable will be anything until the next slash. For example: + + r := mux.NewRouter() + r.HandleFunc("/products/{key}", ProductHandler) + r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) + r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) + +The names are used to create a map of route variables which can be retrieved +calling mux.Vars(): + + vars := mux.Vars(request) + category := vars["category"] + +And this is all you need to know about the basic usage. More advanced options +are explained below. + +Routes can also be restricted to a domain or subdomain. Just define a host +pattern to be matched. They can also have variables: + + r := mux.NewRouter() + // Only matches if domain is "www.domain.com". + r.Host("www.domain.com") + // Matches a dynamic subdomain. + r.Host("{subdomain:[a-z]+}.domain.com") + +There are several other matchers that can be added. To match path prefixes: + + r.PathPrefix("/products/") + +...or HTTP methods: + + r.Methods("GET", "POST") + +...or URL schemes: + + r.Schemes("https") + +...or header values: + + r.Headers("X-Requested-With", "XMLHttpRequest") + +...or query values: + + r.Queries("key", "value") + +...or to use a custom matcher function: + + r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { + return r.ProtoMajor == 0 + }) + +...and finally, it is possible to combine several matchers in a single route: + + r.HandleFunc("/products", ProductsHandler). + Host("www.domain.com"). + Methods("GET"). + Schemes("http") + +Setting the same matching conditions again and again can be boring, so we have +a way to group several routes that share the same requirements. +We call it "subrouting". + +For example, let's say we have several URLs that should only match when the +host is "www.domain.com". Create a route for that host and get a "subrouter" +from it: + + r := mux.NewRouter() + s := r.Host("www.domain.com").Subrouter() + +Then register routes in the subrouter: + + s.HandleFunc("/products/", ProductsHandler) + s.HandleFunc("/products/{key}", ProductHandler) + s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) + +The three URL paths we registered above will only be tested if the domain is +"www.domain.com", because the subrouter is tested first. This is not +only convenient, but also optimizes request matching. You can create +subrouters combining any attribute matchers accepted by a route. + +Subrouters can be used to create domain or path "namespaces": you define +subrouters in a central place and then parts of the app can register its +paths relatively to a given subrouter. + +There's one more thing about subroutes. When a subrouter has a path prefix, +the inner routes use it as base for their paths: + + r := mux.NewRouter() + s := r.PathPrefix("/products").Subrouter() + // "/products/" + s.HandleFunc("/", ProductsHandler) + // "/products/{key}/" + s.HandleFunc("/{key}/", ProductHandler) + // "/products/{key}/details" + s.HandleFunc("/{key}/details", ProductDetailsHandler) + +Now let's see how to build registered URLs. + +Routes can be named. All routes that define a name can have their URLs built, +or "reversed". We define a name calling Name() on a route. For example: + + r := mux.NewRouter() + r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). + Name("article") + +To build a URL, get the route and call the URL() method, passing a sequence of +key/value pairs for the route variables. For the previous route, we would do: + + url, err := r.Get("article").URL("category", "technology", "id", "42") + +...and the result will be a url.URL with the following path: + + "/articles/technology/42" + +This also works for host variables: + + r := mux.NewRouter() + r.Host("{subdomain}.domain.com"). + Path("/articles/{category}/{id:[0-9]+}"). + HandlerFunc(ArticleHandler). + Name("article") + + // url.String() will be "http://news.domain.com/articles/technology/42" + url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42") + +All variables defined in the route are required, and their values must +conform to the corresponding patterns. These requirements guarantee that a +generated URL will always match a registered route -- the only exception is +for explicitly defined "build-only" routes which never match. + +There's also a way to build only the URL host or path for a route: +use the methods URLHost() or URLPath() instead. For the previous route, +we would do: + + // "http://news.domain.com/" + host, err := r.Get("article").URLHost("subdomain", "news") + + // "/articles/technology/42" + path, err := r.Get("article").URLPath("category", "technology", "id", "42") + +And if you use subrouters, host and path defined separately can be built +as well: + + r := mux.NewRouter() + s := r.Host("{subdomain}.domain.com").Subrouter() + s.Path("/articles/{category}/{id:[0-9]+}"). + HandlerFunc(ArticleHandler). + Name("article") + + // "http://news.domain.com/articles/technology/42" + url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42") +*/ +package mux diff --git a/tests/Godeps/_workspace/src/github.com/gorilla/mux/mux.go b/tests/Godeps/_workspace/src/github.com/gorilla/mux/mux.go new file mode 100644 index 0000000000..5b5f8e7db5 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/gorilla/mux/mux.go @@ -0,0 +1,353 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "fmt" + "net/http" + "path" + + "github.com/gorilla/context" +) + +// NewRouter returns a new router instance. +func NewRouter() *Router { + return &Router{namedRoutes: make(map[string]*Route), KeepContext: false} +} + +// Router registers routes to be matched and dispatches a handler. +// +// It implements the http.Handler interface, so it can be registered to serve +// requests: +// +// var router = mux.NewRouter() +// +// func main() { +// http.Handle("/", router) +// } +// +// Or, for Google App Engine, register it in a init() function: +// +// func init() { +// http.Handle("/", router) +// } +// +// This will send all incoming requests to the router. +type Router struct { + // Configurable Handler to be used when no route matches. + NotFoundHandler http.Handler + // Parent route, if this is a subrouter. + parent parentRoute + // Routes to be matched, in order. + routes []*Route + // Routes by name for URL building. + namedRoutes map[string]*Route + // See Router.StrictSlash(). This defines the flag for new routes. + strictSlash bool + // If true, do not clear the request context after handling the request + KeepContext bool +} + +// Match matches registered routes against the request. +func (r *Router) Match(req *http.Request, match *RouteMatch) bool { + for _, route := range r.routes { + if route.Match(req, match) { + return true + } + } + return false +} + +// ServeHTTP dispatches the handler registered in the matched route. +// +// When there is a match, the route variables can be retrieved calling +// mux.Vars(request). +func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { + // Clean path to canonical form and redirect. + if p := cleanPath(req.URL.Path); p != req.URL.Path { + + // Added 3 lines (Philip Schlump) - It was droping the query string and #whatever from query. + // This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue: + // http://code.google.com/p/go/issues/detail?id=5252 + url := *req.URL + url.Path = p + p = url.String() + + w.Header().Set("Location", p) + w.WriteHeader(http.StatusMovedPermanently) + return + } + var match RouteMatch + var handler http.Handler + if r.Match(req, &match) { + handler = match.Handler + setVars(req, match.Vars) + setCurrentRoute(req, match.Route) + } + if handler == nil { + handler = r.NotFoundHandler + if handler == nil { + handler = http.NotFoundHandler() + } + } + if !r.KeepContext { + defer context.Clear(req) + } + handler.ServeHTTP(w, req) +} + +// Get returns a route registered with the given name. +func (r *Router) Get(name string) *Route { + return r.getNamedRoutes()[name] +} + +// GetRoute returns a route registered with the given name. This method +// was renamed to Get() and remains here for backwards compatibility. +func (r *Router) GetRoute(name string) *Route { + return r.getNamedRoutes()[name] +} + +// StrictSlash defines the trailing slash behavior for new routes. The initial +// value is false. +// +// When true, if the route path is "/path/", accessing "/path" will redirect +// to the former and vice versa. In other words, your application will always +// see the path as specified in the route. +// +// When false, if the route path is "/path", accessing "/path/" will not match +// this route and vice versa. +// +// Special case: when a route sets a path prefix using the PathPrefix() method, +// strict slash is ignored for that route because the redirect behavior can't +// be determined from a prefix alone. However, any subrouters created from that +// route inherit the original StrictSlash setting. +func (r *Router) StrictSlash(value bool) *Router { + r.strictSlash = value + return r +} + +// ---------------------------------------------------------------------------- +// parentRoute +// ---------------------------------------------------------------------------- + +// getNamedRoutes returns the map where named routes are registered. +func (r *Router) getNamedRoutes() map[string]*Route { + if r.namedRoutes == nil { + if r.parent != nil { + r.namedRoutes = r.parent.getNamedRoutes() + } else { + r.namedRoutes = make(map[string]*Route) + } + } + return r.namedRoutes +} + +// getRegexpGroup returns regexp definitions from the parent route, if any. +func (r *Router) getRegexpGroup() *routeRegexpGroup { + if r.parent != nil { + return r.parent.getRegexpGroup() + } + return nil +} + +// ---------------------------------------------------------------------------- +// Route factories +// ---------------------------------------------------------------------------- + +// NewRoute registers an empty route. +func (r *Router) NewRoute() *Route { + route := &Route{parent: r, strictSlash: r.strictSlash} + r.routes = append(r.routes, route) + return route +} + +// Handle registers a new route with a matcher for the URL path. +// See Route.Path() and Route.Handler(). +func (r *Router) Handle(path string, handler http.Handler) *Route { + return r.NewRoute().Path(path).Handler(handler) +} + +// HandleFunc registers a new route with a matcher for the URL path. +// See Route.Path() and Route.HandlerFunc(). +func (r *Router) HandleFunc(path string, f func(http.ResponseWriter, + *http.Request)) *Route { + return r.NewRoute().Path(path).HandlerFunc(f) +} + +// Headers registers a new route with a matcher for request header values. +// See Route.Headers(). +func (r *Router) Headers(pairs ...string) *Route { + return r.NewRoute().Headers(pairs...) +} + +// Host registers a new route with a matcher for the URL host. +// See Route.Host(). +func (r *Router) Host(tpl string) *Route { + return r.NewRoute().Host(tpl) +} + +// MatcherFunc registers a new route with a custom matcher function. +// See Route.MatcherFunc(). +func (r *Router) MatcherFunc(f MatcherFunc) *Route { + return r.NewRoute().MatcherFunc(f) +} + +// Methods registers a new route with a matcher for HTTP methods. +// See Route.Methods(). +func (r *Router) Methods(methods ...string) *Route { + return r.NewRoute().Methods(methods...) +} + +// Path registers a new route with a matcher for the URL path. +// See Route.Path(). +func (r *Router) Path(tpl string) *Route { + return r.NewRoute().Path(tpl) +} + +// PathPrefix registers a new route with a matcher for the URL path prefix. +// See Route.PathPrefix(). +func (r *Router) PathPrefix(tpl string) *Route { + return r.NewRoute().PathPrefix(tpl) +} + +// Queries registers a new route with a matcher for URL query values. +// See Route.Queries(). +func (r *Router) Queries(pairs ...string) *Route { + return r.NewRoute().Queries(pairs...) +} + +// Schemes registers a new route with a matcher for URL schemes. +// See Route.Schemes(). +func (r *Router) Schemes(schemes ...string) *Route { + return r.NewRoute().Schemes(schemes...) +} + +// ---------------------------------------------------------------------------- +// Context +// ---------------------------------------------------------------------------- + +// RouteMatch stores information about a matched route. +type RouteMatch struct { + Route *Route + Handler http.Handler + Vars map[string]string +} + +type contextKey int + +const ( + varsKey contextKey = iota + routeKey +) + +// Vars returns the route variables for the current request, if any. +func Vars(r *http.Request) map[string]string { + if rv := context.Get(r, varsKey); rv != nil { + return rv.(map[string]string) + } + return nil +} + +// CurrentRoute returns the matched route for the current request, if any. +func CurrentRoute(r *http.Request) *Route { + if rv := context.Get(r, routeKey); rv != nil { + return rv.(*Route) + } + return nil +} + +func setVars(r *http.Request, val interface{}) { + context.Set(r, varsKey, val) +} + +func setCurrentRoute(r *http.Request, val interface{}) { + context.Set(r, routeKey, val) +} + +// ---------------------------------------------------------------------------- +// Helpers +// ---------------------------------------------------------------------------- + +// cleanPath returns the canonical path for p, eliminating . and .. elements. +// Borrowed from the net/http package. +func cleanPath(p string) string { + if p == "" { + return "/" + } + if p[0] != '/' { + p = "/" + p + } + np := path.Clean(p) + // path.Clean removes trailing slash except for root; + // put the trailing slash back if necessary. + if p[len(p)-1] == '/' && np != "/" { + np += "/" + } + return np +} + +// uniqueVars returns an error if two slices contain duplicated strings. +func uniqueVars(s1, s2 []string) error { + for _, v1 := range s1 { + for _, v2 := range s2 { + if v1 == v2 { + return fmt.Errorf("mux: duplicated route variable %q", v2) + } + } + } + return nil +} + +// mapFromPairs converts variadic string parameters to a string map. +func mapFromPairs(pairs ...string) (map[string]string, error) { + length := len(pairs) + if length%2 != 0 { + return nil, fmt.Errorf( + "mux: number of parameters must be multiple of 2, got %v", pairs) + } + m := make(map[string]string, length/2) + for i := 0; i < length; i += 2 { + m[pairs[i]] = pairs[i+1] + } + return m, nil +} + +// matchInArray returns true if the given string value is in the array. +func matchInArray(arr []string, value string) bool { + for _, v := range arr { + if v == value { + return true + } + } + return false +} + +// matchMap returns true if the given key/value pairs exist in a given map. +func matchMap(toCheck map[string]string, toMatch map[string][]string, + canonicalKey bool) bool { + for k, v := range toCheck { + // Check if key exists. + if canonicalKey { + k = http.CanonicalHeaderKey(k) + } + if values := toMatch[k]; values == nil { + return false + } else if v != "" { + // If value was defined as an empty string we only check that the + // key exists. Otherwise we also check for equality. + valueExists := false + for _, value := range values { + if v == value { + valueExists = true + break + } + } + if !valueExists { + return false + } + } + } + return true +} diff --git a/tests/Godeps/_workspace/src/github.com/gorilla/mux/mux_test.go b/tests/Godeps/_workspace/src/github.com/gorilla/mux/mux_test.go new file mode 100644 index 0000000000..e455bce8fd --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/gorilla/mux/mux_test.go @@ -0,0 +1,943 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gorilla/context" +) + +type routeTest struct { + title string // title of the test + route *Route // the route being tested + request *http.Request // a request to test the route + vars map[string]string // the expected vars of the match + host string // the expected host of the match + path string // the expected path of the match + shouldMatch bool // whether the request is expected to match the route at all + shouldRedirect bool // whether the request should result in a redirect +} + +func TestHost(t *testing.T) { + // newRequestHost a new request with a method, url, and host header + newRequestHost := func(method, url, host string) *http.Request { + req, err := http.NewRequest(method, url, nil) + if err != nil { + panic(err) + } + req.Host = host + return req + } + + tests := []routeTest{ + { + title: "Host route match", + route: new(Route).Host("aaa.bbb.ccc"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: true, + }, + { + title: "Host route, wrong host in request URL", + route: new(Route).Host("aaa.bbb.ccc"), + request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), + vars: map[string]string{}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: false, + }, + { + title: "Host route with port, match", + route: new(Route).Host("aaa.bbb.ccc:1234"), + request: newRequest("GET", "http://aaa.bbb.ccc:1234/111/222/333"), + vars: map[string]string{}, + host: "aaa.bbb.ccc:1234", + path: "", + shouldMatch: true, + }, + { + title: "Host route with port, wrong port in request URL", + route: new(Route).Host("aaa.bbb.ccc:1234"), + request: newRequest("GET", "http://aaa.bbb.ccc:9999/111/222/333"), + vars: map[string]string{}, + host: "aaa.bbb.ccc:1234", + path: "", + shouldMatch: false, + }, + { + title: "Host route, match with host in request header", + route: new(Route).Host("aaa.bbb.ccc"), + request: newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc"), + vars: map[string]string{}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: true, + }, + { + title: "Host route, wrong host in request header", + route: new(Route).Host("aaa.bbb.ccc"), + request: newRequestHost("GET", "/111/222/333", "aaa.222.ccc"), + vars: map[string]string{}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: false, + }, + // BUG {new(Route).Host("aaa.bbb.ccc:1234"), newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc:1234"), map[string]string{}, "aaa.bbb.ccc:1234", "", true}, + { + title: "Host route with port, wrong host in request header", + route: new(Route).Host("aaa.bbb.ccc:1234"), + request: newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc:9999"), + vars: map[string]string{}, + host: "aaa.bbb.ccc:1234", + path: "", + shouldMatch: false, + }, + { + title: "Host route with pattern, match", + route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{"v1": "bbb"}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: true, + }, + { + title: "Host route with pattern, wrong host in request URL", + route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc"), + request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), + vars: map[string]string{"v1": "bbb"}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: false, + }, + { + title: "Host route with multiple patterns, match", + route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc"}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: true, + }, + { + title: "Host route with multiple patterns, wrong host in request URL", + route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}"), + request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), + vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc"}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: false, + }, + } + for _, test := range tests { + testRoute(t, test) + } +} + +func TestPath(t *testing.T) { + tests := []routeTest{ + { + title: "Path route, match", + route: new(Route).Path("/111/222/333"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{}, + host: "", + path: "/111/222/333", + shouldMatch: true, + }, + { + title: "Path route, match with trailing slash in request and path", + route: new(Route).Path("/111/"), + request: newRequest("GET", "http://localhost/111/"), + vars: map[string]string{}, + host: "", + path: "/111/", + shouldMatch: true, + }, + { + title: "Path route, do not match with trailing slash in path", + route: new(Route).Path("/111/"), + request: newRequest("GET", "http://localhost/111"), + vars: map[string]string{}, + host: "", + path: "/111", + shouldMatch: false, + }, + { + title: "Path route, do not match with trailing slash in request", + route: new(Route).Path("/111"), + request: newRequest("GET", "http://localhost/111/"), + vars: map[string]string{}, + host: "", + path: "/111/", + shouldMatch: false, + }, + { + title: "Path route, wrong path in request in request URL", + route: new(Route).Path("/111/222/333"), + request: newRequest("GET", "http://localhost/1/2/3"), + vars: map[string]string{}, + host: "", + path: "/111/222/333", + shouldMatch: false, + }, + { + title: "Path route with pattern, match", + route: new(Route).Path("/111/{v1:[0-9]{3}}/333"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{"v1": "222"}, + host: "", + path: "/111/222/333", + shouldMatch: true, + }, + { + title: "Path route with pattern, URL in request does not match", + route: new(Route).Path("/111/{v1:[0-9]{3}}/333"), + request: newRequest("GET", "http://localhost/111/aaa/333"), + vars: map[string]string{"v1": "222"}, + host: "", + path: "/111/222/333", + shouldMatch: false, + }, + { + title: "Path route with multiple patterns, match", + route: new(Route).Path("/{v1:[0-9]{3}}/{v2:[0-9]{3}}/{v3:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{"v1": "111", "v2": "222", "v3": "333"}, + host: "", + path: "/111/222/333", + shouldMatch: true, + }, + { + title: "Path route with multiple patterns, URL in request does not match", + route: new(Route).Path("/{v1:[0-9]{3}}/{v2:[0-9]{3}}/{v3:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/aaa/333"), + vars: map[string]string{"v1": "111", "v2": "222", "v3": "333"}, + host: "", + path: "/111/222/333", + shouldMatch: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestPathPrefix(t *testing.T) { + tests := []routeTest{ + { + title: "PathPrefix route, match", + route: new(Route).PathPrefix("/111"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{}, + host: "", + path: "/111", + shouldMatch: true, + }, + { + title: "PathPrefix route, match substring", + route: new(Route).PathPrefix("/1"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{}, + host: "", + path: "/1", + shouldMatch: true, + }, + { + title: "PathPrefix route, URL prefix in request does not match", + route: new(Route).PathPrefix("/111"), + request: newRequest("GET", "http://localhost/1/2/3"), + vars: map[string]string{}, + host: "", + path: "/111", + shouldMatch: false, + }, + { + title: "PathPrefix route with pattern, match", + route: new(Route).PathPrefix("/111/{v1:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{"v1": "222"}, + host: "", + path: "/111/222", + shouldMatch: true, + }, + { + title: "PathPrefix route with pattern, URL prefix in request does not match", + route: new(Route).PathPrefix("/111/{v1:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/aaa/333"), + vars: map[string]string{"v1": "222"}, + host: "", + path: "/111/222", + shouldMatch: false, + }, + { + title: "PathPrefix route with multiple patterns, match", + route: new(Route).PathPrefix("/{v1:[0-9]{3}}/{v2:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{"v1": "111", "v2": "222"}, + host: "", + path: "/111/222", + shouldMatch: true, + }, + { + title: "PathPrefix route with multiple patterns, URL prefix in request does not match", + route: new(Route).PathPrefix("/{v1:[0-9]{3}}/{v2:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/aaa/333"), + vars: map[string]string{"v1": "111", "v2": "222"}, + host: "", + path: "/111/222", + shouldMatch: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestHostPath(t *testing.T) { + tests := []routeTest{ + { + title: "Host and Path route, match", + route: new(Route).Host("aaa.bbb.ccc").Path("/111/222/333"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Host and Path route, wrong host in request URL", + route: new(Route).Host("aaa.bbb.ccc").Path("/111/222/333"), + request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Host and Path route with pattern, match", + route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc").Path("/111/{v2:[0-9]{3}}/333"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{"v1": "bbb", "v2": "222"}, + host: "aaa.bbb.ccc", + path: "/111/222/333", + shouldMatch: true, + }, + { + title: "Host and Path route with pattern, URL in request does not match", + route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc").Path("/111/{v2:[0-9]{3}}/333"), + request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), + vars: map[string]string{"v1": "bbb", "v2": "222"}, + host: "aaa.bbb.ccc", + path: "/111/222/333", + shouldMatch: false, + }, + { + title: "Host and Path route with multiple patterns, match", + route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}").Path("/{v4:[0-9]{3}}/{v5:[0-9]{3}}/{v6:[0-9]{3}}"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc", "v4": "111", "v5": "222", "v6": "333"}, + host: "aaa.bbb.ccc", + path: "/111/222/333", + shouldMatch: true, + }, + { + title: "Host and Path route with multiple patterns, URL in request does not match", + route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}").Path("/{v4:[0-9]{3}}/{v5:[0-9]{3}}/{v6:[0-9]{3}}"), + request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), + vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc", "v4": "111", "v5": "222", "v6": "333"}, + host: "aaa.bbb.ccc", + path: "/111/222/333", + shouldMatch: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestHeaders(t *testing.T) { + // newRequestHeaders creates a new request with a method, url, and headers + newRequestHeaders := func(method, url string, headers map[string]string) *http.Request { + req, err := http.NewRequest(method, url, nil) + if err != nil { + panic(err) + } + for k, v := range headers { + req.Header.Add(k, v) + } + return req + } + + tests := []routeTest{ + { + title: "Headers route, match", + route: new(Route).Headers("foo", "bar", "baz", "ding"), + request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "bar", "baz": "ding"}), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Headers route, bad header values", + route: new(Route).Headers("foo", "bar", "baz", "ding"), + request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "bar", "baz": "dong"}), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } + +} + +func TestMethods(t *testing.T) { + tests := []routeTest{ + { + title: "Methods route, match GET", + route: new(Route).Methods("GET", "POST"), + request: newRequest("GET", "http://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Methods route, match POST", + route: new(Route).Methods("GET", "POST"), + request: newRequest("POST", "http://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Methods route, bad method", + route: new(Route).Methods("GET", "POST"), + request: newRequest("PUT", "http://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestQueries(t *testing.T) { + tests := []routeTest{ + { + title: "Queries route, match", + route: new(Route).Queries("foo", "bar", "baz", "ding"), + request: newRequest("GET", "http://localhost?foo=bar&baz=ding"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route, match with a query string", + route: new(Route).Host("www.example.com").Path("/api").Queries("foo", "bar", "baz", "ding"), + request: newRequest("GET", "http://www.example.com/api?foo=bar&baz=ding"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route, match with a query string out of order", + route: new(Route).Host("www.example.com").Path("/api").Queries("foo", "bar", "baz", "ding"), + request: newRequest("GET", "http://www.example.com/api?baz=ding&foo=bar"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route, bad query", + route: new(Route).Queries("foo", "bar", "baz", "ding"), + request: newRequest("GET", "http://localhost?foo=bar&baz=dong"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Queries route with pattern, match", + route: new(Route).Queries("foo", "{v1}"), + request: newRequest("GET", "http://localhost?foo=bar"), + vars: map[string]string{"v1": "bar"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with multiple patterns, match", + route: new(Route).Queries("foo", "{v1}", "baz", "{v2}"), + request: newRequest("GET", "http://localhost?foo=bar&baz=ding"), + vars: map[string]string{"v1": "bar", "v2": "ding"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with regexp pattern, match", + route: new(Route).Queries("foo", "{v1:[0-9]+}"), + request: newRequest("GET", "http://localhost?foo=10"), + vars: map[string]string{"v1": "10"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with regexp pattern, regexp does not match", + route: new(Route).Queries("foo", "{v1:[0-9]+}"), + request: newRequest("GET", "http://localhost?foo=a"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestSchemes(t *testing.T) { + tests := []routeTest{ + // Schemes + { + title: "Schemes route, match https", + route: new(Route).Schemes("https", "ftp"), + request: newRequest("GET", "https://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Schemes route, match ftp", + route: new(Route).Schemes("https", "ftp"), + request: newRequest("GET", "ftp://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Schemes route, bad scheme", + route: new(Route).Schemes("https", "ftp"), + request: newRequest("GET", "http://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + } + for _, test := range tests { + testRoute(t, test) + } +} + +func TestMatcherFunc(t *testing.T) { + m := func(r *http.Request, m *RouteMatch) bool { + if r.URL.Host == "aaa.bbb.ccc" { + return true + } + return false + } + + tests := []routeTest{ + { + title: "MatchFunc route, match", + route: new(Route).MatcherFunc(m), + request: newRequest("GET", "http://aaa.bbb.ccc"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "MatchFunc route, non-match", + route: new(Route).MatcherFunc(m), + request: newRequest("GET", "http://aaa.222.ccc"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestSubRouter(t *testing.T) { + subrouter1 := new(Route).Host("{v1:[a-z]+}.google.com").Subrouter() + subrouter2 := new(Route).PathPrefix("/foo/{v1}").Subrouter() + + tests := []routeTest{ + { + route: subrouter1.Path("/{v2:[a-z]+}"), + request: newRequest("GET", "http://aaa.google.com/bbb"), + vars: map[string]string{"v1": "aaa", "v2": "bbb"}, + host: "aaa.google.com", + path: "/bbb", + shouldMatch: true, + }, + { + route: subrouter1.Path("/{v2:[a-z]+}"), + request: newRequest("GET", "http://111.google.com/111"), + vars: map[string]string{"v1": "aaa", "v2": "bbb"}, + host: "aaa.google.com", + path: "/bbb", + shouldMatch: false, + }, + { + route: subrouter2.Path("/baz/{v2}"), + request: newRequest("GET", "http://localhost/foo/bar/baz/ding"), + vars: map[string]string{"v1": "bar", "v2": "ding"}, + host: "", + path: "/foo/bar/baz/ding", + shouldMatch: true, + }, + { + route: subrouter2.Path("/baz/{v2}"), + request: newRequest("GET", "http://localhost/foo/bar"), + vars: map[string]string{"v1": "bar", "v2": "ding"}, + host: "", + path: "/foo/bar/baz/ding", + shouldMatch: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestNamedRoutes(t *testing.T) { + r1 := NewRouter() + r1.NewRoute().Name("a") + r1.NewRoute().Name("b") + r1.NewRoute().Name("c") + + r2 := r1.NewRoute().Subrouter() + r2.NewRoute().Name("d") + r2.NewRoute().Name("e") + r2.NewRoute().Name("f") + + r3 := r2.NewRoute().Subrouter() + r3.NewRoute().Name("g") + r3.NewRoute().Name("h") + r3.NewRoute().Name("i") + + if r1.namedRoutes == nil || len(r1.namedRoutes) != 9 { + t.Errorf("Expected 9 named routes, got %v", r1.namedRoutes) + } else if r1.Get("i") == nil { + t.Errorf("Subroute name not registered") + } +} + +func TestStrictSlash(t *testing.T) { + r := NewRouter() + r.StrictSlash(true) + + tests := []routeTest{ + { + title: "Redirect path without slash", + route: r.NewRoute().Path("/111/"), + request: newRequest("GET", "http://localhost/111"), + vars: map[string]string{}, + host: "", + path: "/111/", + shouldMatch: true, + shouldRedirect: true, + }, + { + title: "Do not redirect path with slash", + route: r.NewRoute().Path("/111/"), + request: newRequest("GET", "http://localhost/111/"), + vars: map[string]string{}, + host: "", + path: "/111/", + shouldMatch: true, + shouldRedirect: false, + }, + { + title: "Redirect path with slash", + route: r.NewRoute().Path("/111"), + request: newRequest("GET", "http://localhost/111/"), + vars: map[string]string{}, + host: "", + path: "/111", + shouldMatch: true, + shouldRedirect: true, + }, + { + title: "Do not redirect path without slash", + route: r.NewRoute().Path("/111"), + request: newRequest("GET", "http://localhost/111"), + vars: map[string]string{}, + host: "", + path: "/111", + shouldMatch: true, + shouldRedirect: false, + }, + { + title: "Propagate StrictSlash to subrouters", + route: r.NewRoute().PathPrefix("/static/").Subrouter().Path("/images/"), + request: newRequest("GET", "http://localhost/static/images"), + vars: map[string]string{}, + host: "", + path: "/static/images/", + shouldMatch: true, + shouldRedirect: true, + }, + { + title: "Ignore StrictSlash for path prefix", + route: r.NewRoute().PathPrefix("/static/"), + request: newRequest("GET", "http://localhost/static/logo.png"), + vars: map[string]string{}, + host: "", + path: "/static/", + shouldMatch: true, + shouldRedirect: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +// ---------------------------------------------------------------------------- +// Helpers +// ---------------------------------------------------------------------------- + +func getRouteTemplate(route *Route) string { + host, path := "none", "none" + if route.regexp != nil { + if route.regexp.host != nil { + host = route.regexp.host.template + } + if route.regexp.path != nil { + path = route.regexp.path.template + } + } + return fmt.Sprintf("Host: %v, Path: %v", host, path) +} + +func testRoute(t *testing.T, test routeTest) { + request := test.request + route := test.route + vars := test.vars + shouldMatch := test.shouldMatch + host := test.host + path := test.path + url := test.host + test.path + shouldRedirect := test.shouldRedirect + + var match RouteMatch + ok := route.Match(request, &match) + if ok != shouldMatch { + msg := "Should match" + if !shouldMatch { + msg = "Should not match" + } + t.Errorf("(%v) %v:\nRoute: %#v\nRequest: %#v\nVars: %v\n", test.title, msg, route, request, vars) + return + } + if shouldMatch { + if test.vars != nil && !stringMapEqual(test.vars, match.Vars) { + t.Errorf("(%v) Vars not equal: expected %v, got %v", test.title, vars, match.Vars) + return + } + if host != "" { + u, _ := test.route.URLHost(mapToPairs(match.Vars)...) + if host != u.Host { + t.Errorf("(%v) URLHost not equal: expected %v, got %v -- %v", test.title, host, u.Host, getRouteTemplate(route)) + return + } + } + if path != "" { + u, _ := route.URLPath(mapToPairs(match.Vars)...) + if path != u.Path { + t.Errorf("(%v) URLPath not equal: expected %v, got %v -- %v", test.title, path, u.Path, getRouteTemplate(route)) + return + } + } + if url != "" { + u, _ := route.URL(mapToPairs(match.Vars)...) + if url != u.Host+u.Path { + t.Errorf("(%v) URL not equal: expected %v, got %v -- %v", test.title, url, u.Host+u.Path, getRouteTemplate(route)) + return + } + } + if shouldRedirect && match.Handler == nil { + t.Errorf("(%v) Did not redirect", test.title) + return + } + if !shouldRedirect && match.Handler != nil { + t.Errorf("(%v) Unexpected redirect", test.title) + return + } + } +} + +// Tests that the context is cleared or not cleared properly depending on +// the configuration of the router +func TestKeepContext(t *testing.T) { + func1 := func(w http.ResponseWriter, r *http.Request) {} + + r := NewRouter() + r.HandleFunc("/", func1).Name("func1") + + req, _ := http.NewRequest("GET", "http://localhost/", nil) + context.Set(req, "t", 1) + + res := new(http.ResponseWriter) + r.ServeHTTP(*res, req) + + if _, ok := context.GetOk(req, "t"); ok { + t.Error("Context should have been cleared at end of request") + } + + r.KeepContext = true + + req, _ = http.NewRequest("GET", "http://localhost/", nil) + context.Set(req, "t", 1) + + r.ServeHTTP(*res, req) + if _, ok := context.GetOk(req, "t"); !ok { + t.Error("Context should NOT have been cleared at end of request") + } + +} + +type TestA301ResponseWriter struct { + hh http.Header + status int +} + +func (ho TestA301ResponseWriter) Header() http.Header { + return http.Header(ho.hh) +} + +func (ho TestA301ResponseWriter) Write(b []byte) (int, error) { + return 0, nil +} + +func (ho TestA301ResponseWriter) WriteHeader(code int) { + ho.status = code +} + +func Test301Redirect(t *testing.T) { + m := make(http.Header) + + func1 := func(w http.ResponseWriter, r *http.Request) {} + func2 := func(w http.ResponseWriter, r *http.Request) {} + + r := NewRouter() + r.HandleFunc("/api/", func2).Name("func2") + r.HandleFunc("/", func1).Name("func1") + + req, _ := http.NewRequest("GET", "http://localhost//api/?abc=def", nil) + + res := TestA301ResponseWriter{ + hh: m, + status: 0, + } + r.ServeHTTP(&res, req) + + if "http://localhost/api/?abc=def" != res.hh["Location"][0] { + t.Errorf("Should have complete URL with query string") + } +} + +// https://plus.google.com/101022900381697718949/posts/eWy6DjFJ6uW +func TestSubrouterHeader(t *testing.T) { + expected := "func1 response" + func1 := func(w http.ResponseWriter, r *http.Request) { + fmt.Fprint(w, expected) + } + func2 := func(http.ResponseWriter, *http.Request) {} + + r := NewRouter() + s := r.Headers("SomeSpecialHeader", "").Subrouter() + s.HandleFunc("/", func1).Name("func1") + r.HandleFunc("/", func2).Name("func2") + + req, _ := http.NewRequest("GET", "http://localhost/", nil) + req.Header.Add("SomeSpecialHeader", "foo") + match := new(RouteMatch) + matched := r.Match(req, match) + if !matched { + t.Errorf("Should match request") + } + if match.Route.GetName() != "func1" { + t.Errorf("Expecting func1 handler, got %s", match.Route.GetName()) + } + resp := NewRecorder() + match.Handler.ServeHTTP(resp, req) + if resp.Body.String() != expected { + t.Errorf("Expecting %q", expected) + } +} + +// mapToPairs converts a string map to a slice of string pairs +func mapToPairs(m map[string]string) []string { + var i int + p := make([]string, len(m)*2) + for k, v := range m { + p[i] = k + p[i+1] = v + i += 2 + } + return p +} + +// stringMapEqual checks the equality of two string maps +func stringMapEqual(m1, m2 map[string]string) bool { + nil1 := m1 == nil + nil2 := m2 == nil + if nil1 != nil2 || len(m1) != len(m2) { + return false + } + for k, v := range m1 { + if v != m2[k] { + return false + } + } + return true +} + +// newRequest is a helper function to create a new request with a method and url +func newRequest(method, url string) *http.Request { + req, err := http.NewRequest(method, url, nil) + if err != nil { + panic(err) + } + return req +} diff --git a/tests/Godeps/_workspace/src/github.com/gorilla/mux/old_test.go b/tests/Godeps/_workspace/src/github.com/gorilla/mux/old_test.go new file mode 100644 index 0000000000..1f7c190c0f --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/gorilla/mux/old_test.go @@ -0,0 +1,714 @@ +// Old tests ported to Go1. This is a mess. Want to drop it one day. + +// Copyright 2011 Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "bytes" + "net/http" + "testing" +) + +// ---------------------------------------------------------------------------- +// ResponseRecorder +// ---------------------------------------------------------------------------- +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// ResponseRecorder is an implementation of http.ResponseWriter that +// records its mutations for later inspection in tests. +type ResponseRecorder struct { + Code int // the HTTP response code from WriteHeader + HeaderMap http.Header // the HTTP response headers + Body *bytes.Buffer // if non-nil, the bytes.Buffer to append written data to + Flushed bool +} + +// NewRecorder returns an initialized ResponseRecorder. +func NewRecorder() *ResponseRecorder { + return &ResponseRecorder{ + HeaderMap: make(http.Header), + Body: new(bytes.Buffer), + } +} + +// DefaultRemoteAddr is the default remote address to return in RemoteAddr if +// an explicit DefaultRemoteAddr isn't set on ResponseRecorder. +const DefaultRemoteAddr = "1.2.3.4" + +// Header returns the response headers. +func (rw *ResponseRecorder) Header() http.Header { + return rw.HeaderMap +} + +// Write always succeeds and writes to rw.Body, if not nil. +func (rw *ResponseRecorder) Write(buf []byte) (int, error) { + if rw.Body != nil { + rw.Body.Write(buf) + } + if rw.Code == 0 { + rw.Code = http.StatusOK + } + return len(buf), nil +} + +// WriteHeader sets rw.Code. +func (rw *ResponseRecorder) WriteHeader(code int) { + rw.Code = code +} + +// Flush sets rw.Flushed to true. +func (rw *ResponseRecorder) Flush() { + rw.Flushed = true +} + +// ---------------------------------------------------------------------------- + +func TestRouteMatchers(t *testing.T) { + var scheme, host, path, query, method string + var headers map[string]string + var resultVars map[bool]map[string]string + + router := NewRouter() + router.NewRoute().Host("{var1}.google.com"). + Path("/{var2:[a-z]+}/{var3:[0-9]+}"). + Queries("foo", "bar"). + Methods("GET"). + Schemes("https"). + Headers("x-requested-with", "XMLHttpRequest") + router.NewRoute().Host("www.{var4}.com"). + PathPrefix("/foo/{var5:[a-z]+}/{var6:[0-9]+}"). + Queries("baz", "ding"). + Methods("POST"). + Schemes("http"). + Headers("Content-Type", "application/json") + + reset := func() { + // Everything match. + scheme = "https" + host = "www.google.com" + path = "/product/42" + query = "?foo=bar" + method = "GET" + headers = map[string]string{"X-Requested-With": "XMLHttpRequest"} + resultVars = map[bool]map[string]string{ + true: {"var1": "www", "var2": "product", "var3": "42"}, + false: {}, + } + } + + reset2 := func() { + // Everything match. + scheme = "http" + host = "www.google.com" + path = "/foo/product/42/path/that/is/ignored" + query = "?baz=ding" + method = "POST" + headers = map[string]string{"Content-Type": "application/json"} + resultVars = map[bool]map[string]string{ + true: {"var4": "google", "var5": "product", "var6": "42"}, + false: {}, + } + } + + match := func(shouldMatch bool) { + url := scheme + "://" + host + path + query + request, _ := http.NewRequest(method, url, nil) + for key, value := range headers { + request.Header.Add(key, value) + } + + var routeMatch RouteMatch + matched := router.Match(request, &routeMatch) + if matched != shouldMatch { + // Need better messages. :) + if matched { + t.Errorf("Should match.") + } else { + t.Errorf("Should not match.") + } + } + + if matched { + currentRoute := routeMatch.Route + if currentRoute == nil { + t.Errorf("Expected a current route.") + } + vars := routeMatch.Vars + expectedVars := resultVars[shouldMatch] + if len(vars) != len(expectedVars) { + t.Errorf("Expected vars: %v Got: %v.", expectedVars, vars) + } + for name, value := range vars { + if expectedVars[name] != value { + t.Errorf("Expected vars: %v Got: %v.", expectedVars, vars) + } + } + } + } + + // 1st route -------------------------------------------------------------- + + // Everything match. + reset() + match(true) + + // Scheme doesn't match. + reset() + scheme = "http" + match(false) + + // Host doesn't match. + reset() + host = "www.mygoogle.com" + match(false) + + // Path doesn't match. + reset() + path = "/product/notdigits" + match(false) + + // Query doesn't match. + reset() + query = "?foo=baz" + match(false) + + // Method doesn't match. + reset() + method = "POST" + match(false) + + // Header doesn't match. + reset() + headers = map[string]string{} + match(false) + + // Everything match, again. + reset() + match(true) + + // 2nd route -------------------------------------------------------------- + + // Everything match. + reset2() + match(true) + + // Scheme doesn't match. + reset2() + scheme = "https" + match(false) + + // Host doesn't match. + reset2() + host = "sub.google.com" + match(false) + + // Path doesn't match. + reset2() + path = "/bar/product/42" + match(false) + + // Query doesn't match. + reset2() + query = "?foo=baz" + match(false) + + // Method doesn't match. + reset2() + method = "GET" + match(false) + + // Header doesn't match. + reset2() + headers = map[string]string{} + match(false) + + // Everything match, again. + reset2() + match(true) +} + +type headerMatcherTest struct { + matcher headerMatcher + headers map[string]string + result bool +} + +var headerMatcherTests = []headerMatcherTest{ + { + matcher: headerMatcher(map[string]string{"x-requested-with": "XMLHttpRequest"}), + headers: map[string]string{"X-Requested-With": "XMLHttpRequest"}, + result: true, + }, + { + matcher: headerMatcher(map[string]string{"x-requested-with": ""}), + headers: map[string]string{"X-Requested-With": "anything"}, + result: true, + }, + { + matcher: headerMatcher(map[string]string{"x-requested-with": "XMLHttpRequest"}), + headers: map[string]string{}, + result: false, + }, +} + +type hostMatcherTest struct { + matcher *Route + url string + vars map[string]string + result bool +} + +var hostMatcherTests = []hostMatcherTest{ + { + matcher: NewRouter().NewRoute().Host("{foo:[a-z][a-z][a-z]}.{bar:[a-z][a-z][a-z]}.{baz:[a-z][a-z][a-z]}"), + url: "http://abc.def.ghi/", + vars: map[string]string{"foo": "abc", "bar": "def", "baz": "ghi"}, + result: true, + }, + { + matcher: NewRouter().NewRoute().Host("{foo:[a-z][a-z][a-z]}.{bar:[a-z][a-z][a-z]}.{baz:[a-z][a-z][a-z]}"), + url: "http://a.b.c/", + vars: map[string]string{"foo": "abc", "bar": "def", "baz": "ghi"}, + result: false, + }, +} + +type methodMatcherTest struct { + matcher methodMatcher + method string + result bool +} + +var methodMatcherTests = []methodMatcherTest{ + { + matcher: methodMatcher([]string{"GET", "POST", "PUT"}), + method: "GET", + result: true, + }, + { + matcher: methodMatcher([]string{"GET", "POST", "PUT"}), + method: "POST", + result: true, + }, + { + matcher: methodMatcher([]string{"GET", "POST", "PUT"}), + method: "PUT", + result: true, + }, + { + matcher: methodMatcher([]string{"GET", "POST", "PUT"}), + method: "DELETE", + result: false, + }, +} + +type pathMatcherTest struct { + matcher *Route + url string + vars map[string]string + result bool +} + +var pathMatcherTests = []pathMatcherTest{ + { + matcher: NewRouter().NewRoute().Path("/{foo:[0-9][0-9][0-9]}/{bar:[0-9][0-9][0-9]}/{baz:[0-9][0-9][0-9]}"), + url: "http://localhost:8080/123/456/789", + vars: map[string]string{"foo": "123", "bar": "456", "baz": "789"}, + result: true, + }, + { + matcher: NewRouter().NewRoute().Path("/{foo:[0-9][0-9][0-9]}/{bar:[0-9][0-9][0-9]}/{baz:[0-9][0-9][0-9]}"), + url: "http://localhost:8080/1/2/3", + vars: map[string]string{"foo": "123", "bar": "456", "baz": "789"}, + result: false, + }, +} + +type schemeMatcherTest struct { + matcher schemeMatcher + url string + result bool +} + +var schemeMatcherTests = []schemeMatcherTest{ + { + matcher: schemeMatcher([]string{"http", "https"}), + url: "http://localhost:8080/", + result: true, + }, + { + matcher: schemeMatcher([]string{"http", "https"}), + url: "https://localhost:8080/", + result: true, + }, + { + matcher: schemeMatcher([]string{"https"}), + url: "http://localhost:8080/", + result: false, + }, + { + matcher: schemeMatcher([]string{"http"}), + url: "https://localhost:8080/", + result: false, + }, +} + +type urlBuildingTest struct { + route *Route + vars []string + url string +} + +var urlBuildingTests = []urlBuildingTest{ + { + route: new(Route).Host("foo.domain.com"), + vars: []string{}, + url: "http://foo.domain.com", + }, + { + route: new(Route).Host("{subdomain}.domain.com"), + vars: []string{"subdomain", "bar"}, + url: "http://bar.domain.com", + }, + { + route: new(Route).Host("foo.domain.com").Path("/articles"), + vars: []string{}, + url: "http://foo.domain.com/articles", + }, + { + route: new(Route).Path("/articles"), + vars: []string{}, + url: "/articles", + }, + { + route: new(Route).Path("/articles/{category}/{id:[0-9]+}"), + vars: []string{"category", "technology", "id", "42"}, + url: "/articles/technology/42", + }, + { + route: new(Route).Host("{subdomain}.domain.com").Path("/articles/{category}/{id:[0-9]+}"), + vars: []string{"subdomain", "foo", "category", "technology", "id", "42"}, + url: "http://foo.domain.com/articles/technology/42", + }, +} + +func TestHeaderMatcher(t *testing.T) { + for _, v := range headerMatcherTests { + request, _ := http.NewRequest("GET", "http://localhost:8080/", nil) + for key, value := range v.headers { + request.Header.Add(key, value) + } + var routeMatch RouteMatch + result := v.matcher.Match(request, &routeMatch) + if result != v.result { + if v.result { + t.Errorf("%#v: should match %v.", v.matcher, request.Header) + } else { + t.Errorf("%#v: should not match %v.", v.matcher, request.Header) + } + } + } +} + +func TestHostMatcher(t *testing.T) { + for _, v := range hostMatcherTests { + request, _ := http.NewRequest("GET", v.url, nil) + var routeMatch RouteMatch + result := v.matcher.Match(request, &routeMatch) + vars := routeMatch.Vars + if result != v.result { + if v.result { + t.Errorf("%#v: should match %v.", v.matcher, v.url) + } else { + t.Errorf("%#v: should not match %v.", v.matcher, v.url) + } + } + if result { + if len(vars) != len(v.vars) { + t.Errorf("%#v: vars length should be %v, got %v.", v.matcher, len(v.vars), len(vars)) + } + for name, value := range vars { + if v.vars[name] != value { + t.Errorf("%#v: expected value %v for key %v, got %v.", v.matcher, v.vars[name], name, value) + } + } + } else { + if len(vars) != 0 { + t.Errorf("%#v: vars length should be 0, got %v.", v.matcher, len(vars)) + } + } + } +} + +func TestMethodMatcher(t *testing.T) { + for _, v := range methodMatcherTests { + request, _ := http.NewRequest(v.method, "http://localhost:8080/", nil) + var routeMatch RouteMatch + result := v.matcher.Match(request, &routeMatch) + if result != v.result { + if v.result { + t.Errorf("%#v: should match %v.", v.matcher, v.method) + } else { + t.Errorf("%#v: should not match %v.", v.matcher, v.method) + } + } + } +} + +func TestPathMatcher(t *testing.T) { + for _, v := range pathMatcherTests { + request, _ := http.NewRequest("GET", v.url, nil) + var routeMatch RouteMatch + result := v.matcher.Match(request, &routeMatch) + vars := routeMatch.Vars + if result != v.result { + if v.result { + t.Errorf("%#v: should match %v.", v.matcher, v.url) + } else { + t.Errorf("%#v: should not match %v.", v.matcher, v.url) + } + } + if result { + if len(vars) != len(v.vars) { + t.Errorf("%#v: vars length should be %v, got %v.", v.matcher, len(v.vars), len(vars)) + } + for name, value := range vars { + if v.vars[name] != value { + t.Errorf("%#v: expected value %v for key %v, got %v.", v.matcher, v.vars[name], name, value) + } + } + } else { + if len(vars) != 0 { + t.Errorf("%#v: vars length should be 0, got %v.", v.matcher, len(vars)) + } + } + } +} + +func TestSchemeMatcher(t *testing.T) { + for _, v := range schemeMatcherTests { + request, _ := http.NewRequest("GET", v.url, nil) + var routeMatch RouteMatch + result := v.matcher.Match(request, &routeMatch) + if result != v.result { + if v.result { + t.Errorf("%#v: should match %v.", v.matcher, v.url) + } else { + t.Errorf("%#v: should not match %v.", v.matcher, v.url) + } + } + } +} + +func TestUrlBuilding(t *testing.T) { + + for _, v := range urlBuildingTests { + u, _ := v.route.URL(v.vars...) + url := u.String() + if url != v.url { + t.Errorf("expected %v, got %v", v.url, url) + /* + reversePath := "" + reverseHost := "" + if v.route.pathTemplate != nil { + reversePath = v.route.pathTemplate.Reverse + } + if v.route.hostTemplate != nil { + reverseHost = v.route.hostTemplate.Reverse + } + + t.Errorf("%#v:\nexpected: %q\ngot: %q\nreverse path: %q\nreverse host: %q", v.route, v.url, url, reversePath, reverseHost) + */ + } + } + + ArticleHandler := func(w http.ResponseWriter, r *http.Request) { + } + + router := NewRouter() + router.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).Name("article") + + url, _ := router.Get("article").URL("category", "technology", "id", "42") + expected := "/articles/technology/42" + if url.String() != expected { + t.Errorf("Expected %v, got %v", expected, url.String()) + } +} + +func TestMatchedRouteName(t *testing.T) { + routeName := "stock" + router := NewRouter() + route := router.NewRoute().Path("/products/").Name(routeName) + + url := "http://www.domain.com/products/" + request, _ := http.NewRequest("GET", url, nil) + var rv RouteMatch + ok := router.Match(request, &rv) + + if !ok || rv.Route != route { + t.Errorf("Expected same route, got %+v.", rv.Route) + } + + retName := rv.Route.GetName() + if retName != routeName { + t.Errorf("Expected %q, got %q.", routeName, retName) + } +} + +func TestSubRouting(t *testing.T) { + // Example from docs. + router := NewRouter() + subrouter := router.NewRoute().Host("www.domain.com").Subrouter() + route := subrouter.NewRoute().Path("/products/").Name("products") + + url := "http://www.domain.com/products/" + request, _ := http.NewRequest("GET", url, nil) + var rv RouteMatch + ok := router.Match(request, &rv) + + if !ok || rv.Route != route { + t.Errorf("Expected same route, got %+v.", rv.Route) + } + + u, _ := router.Get("products").URL() + builtUrl := u.String() + // Yay, subroute aware of the domain when building! + if builtUrl != url { + t.Errorf("Expected %q, got %q.", url, builtUrl) + } +} + +func TestVariableNames(t *testing.T) { + route := new(Route).Host("{arg1}.domain.com").Path("/{arg1}/{arg2:[0-9]+}") + if route.err == nil { + t.Errorf("Expected error for duplicated variable names") + } +} + +func TestRedirectSlash(t *testing.T) { + var route *Route + var routeMatch RouteMatch + r := NewRouter() + + r.StrictSlash(false) + route = r.NewRoute() + if route.strictSlash != false { + t.Errorf("Expected false redirectSlash.") + } + + r.StrictSlash(true) + route = r.NewRoute() + if route.strictSlash != true { + t.Errorf("Expected true redirectSlash.") + } + + route = new(Route) + route.strictSlash = true + route.Path("/{arg1}/{arg2:[0-9]+}/") + request, _ := http.NewRequest("GET", "http://localhost/foo/123", nil) + routeMatch = RouteMatch{} + _ = route.Match(request, &routeMatch) + vars := routeMatch.Vars + if vars["arg1"] != "foo" { + t.Errorf("Expected foo.") + } + if vars["arg2"] != "123" { + t.Errorf("Expected 123.") + } + rsp := NewRecorder() + routeMatch.Handler.ServeHTTP(rsp, request) + if rsp.HeaderMap.Get("Location") != "http://localhost/foo/123/" { + t.Errorf("Expected redirect header.") + } + + route = new(Route) + route.strictSlash = true + route.Path("/{arg1}/{arg2:[0-9]+}") + request, _ = http.NewRequest("GET", "http://localhost/foo/123/", nil) + routeMatch = RouteMatch{} + _ = route.Match(request, &routeMatch) + vars = routeMatch.Vars + if vars["arg1"] != "foo" { + t.Errorf("Expected foo.") + } + if vars["arg2"] != "123" { + t.Errorf("Expected 123.") + } + rsp = NewRecorder() + routeMatch.Handler.ServeHTTP(rsp, request) + if rsp.HeaderMap.Get("Location") != "http://localhost/foo/123" { + t.Errorf("Expected redirect header.") + } +} + +// Test for the new regexp library, still not available in stable Go. +func TestNewRegexp(t *testing.T) { + var p *routeRegexp + var matches []string + + tests := map[string]map[string][]string{ + "/{foo:a{2}}": { + "/a": nil, + "/aa": {"aa"}, + "/aaa": nil, + "/aaaa": nil, + }, + "/{foo:a{2,}}": { + "/a": nil, + "/aa": {"aa"}, + "/aaa": {"aaa"}, + "/aaaa": {"aaaa"}, + }, + "/{foo:a{2,3}}": { + "/a": nil, + "/aa": {"aa"}, + "/aaa": {"aaa"}, + "/aaaa": nil, + }, + "/{foo:[a-z]{3}}/{bar:[a-z]{2}}": { + "/a": nil, + "/ab": nil, + "/abc": nil, + "/abcd": nil, + "/abc/ab": {"abc", "ab"}, + "/abc/abc": nil, + "/abcd/ab": nil, + }, + `/{foo:\w{3,}}/{bar:\d{2,}}`: { + "/a": nil, + "/ab": nil, + "/abc": nil, + "/abc/1": nil, + "/abc/12": {"abc", "12"}, + "/abcd/12": {"abcd", "12"}, + "/abcd/123": {"abcd", "123"}, + }, + } + + for pattern, paths := range tests { + p, _ = newRouteRegexp(pattern, false, false, false, false) + for path, result := range paths { + matches = p.regexp.FindStringSubmatch(path) + if result == nil { + if matches != nil { + t.Errorf("%v should not match %v.", pattern, path) + } + } else { + if len(matches) != len(result)+1 { + t.Errorf("Expected %v matches, got %v.", len(result)+1, len(matches)) + } else { + for k, v := range result { + if matches[k+1] != v { + t.Errorf("Expected %v, got %v.", v, matches[k+1]) + } + } + } + } + } + } +} diff --git a/tests/Godeps/_workspace/src/github.com/gorilla/mux/regexp.go b/tests/Godeps/_workspace/src/github.com/gorilla/mux/regexp.go new file mode 100644 index 0000000000..a6305483d5 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/gorilla/mux/regexp.go @@ -0,0 +1,276 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "bytes" + "fmt" + "net/http" + "net/url" + "regexp" + "strings" +) + +// newRouteRegexp parses a route template and returns a routeRegexp, +// used to match a host, a path or a query string. +// +// It will extract named variables, assemble a regexp to be matched, create +// a "reverse" template to build URLs and compile regexps to validate variable +// values used in URL building. +// +// Previously we accepted only Python-like identifiers for variable +// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that +// name and pattern can't be empty, and names can't contain a colon. +func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash bool) (*routeRegexp, error) { + // Check if it is well-formed. + idxs, errBraces := braceIndices(tpl) + if errBraces != nil { + return nil, errBraces + } + // Backup the original. + template := tpl + // Now let's parse it. + defaultPattern := "[^/]+" + if matchQuery { + defaultPattern = "[^?&]+" + matchPrefix = true + } else if matchHost { + defaultPattern = "[^.]+" + matchPrefix = false + } + // Only match strict slash if not matching + if matchPrefix || matchHost || matchQuery { + strictSlash = false + } + // Set a flag for strictSlash. + endSlash := false + if strictSlash && strings.HasSuffix(tpl, "/") { + tpl = tpl[:len(tpl)-1] + endSlash = true + } + varsN := make([]string, len(idxs)/2) + varsR := make([]*regexp.Regexp, len(idxs)/2) + pattern := bytes.NewBufferString("") + if !matchQuery { + pattern.WriteByte('^') + } + reverse := bytes.NewBufferString("") + var end int + var err error + for i := 0; i < len(idxs); i += 2 { + // Set all values we are interested in. + raw := tpl[end:idxs[i]] + end = idxs[i+1] + parts := strings.SplitN(tpl[idxs[i]+1:end-1], ":", 2) + name := parts[0] + patt := defaultPattern + if len(parts) == 2 { + patt = parts[1] + } + // Name or pattern can't be empty. + if name == "" || patt == "" { + return nil, fmt.Errorf("mux: missing name or pattern in %q", + tpl[idxs[i]:end]) + } + // Build the regexp pattern. + fmt.Fprintf(pattern, "%s(%s)", regexp.QuoteMeta(raw), patt) + // Build the reverse template. + fmt.Fprintf(reverse, "%s%%s", raw) + // Append variable name and compiled pattern. + varsN[i/2] = name + varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt)) + if err != nil { + return nil, err + } + } + // Add the remaining. + raw := tpl[end:] + pattern.WriteString(regexp.QuoteMeta(raw)) + if strictSlash { + pattern.WriteString("[/]?") + } + if !matchPrefix { + pattern.WriteByte('$') + } + reverse.WriteString(raw) + if endSlash { + reverse.WriteByte('/') + } + // Compile full regexp. + reg, errCompile := regexp.Compile(pattern.String()) + if errCompile != nil { + return nil, errCompile + } + // Done! + return &routeRegexp{ + template: template, + matchHost: matchHost, + matchQuery: matchQuery, + strictSlash: strictSlash, + regexp: reg, + reverse: reverse.String(), + varsN: varsN, + varsR: varsR, + }, nil +} + +// routeRegexp stores a regexp to match a host or path and information to +// collect and validate route variables. +type routeRegexp struct { + // The unmodified template. + template string + // True for host match, false for path or query string match. + matchHost bool + // True for query string match, false for path and host match. + matchQuery bool + // The strictSlash value defined on the route, but disabled if PathPrefix was used. + strictSlash bool + // Expanded regexp. + regexp *regexp.Regexp + // Reverse template. + reverse string + // Variable names. + varsN []string + // Variable regexps (validators). + varsR []*regexp.Regexp +} + +// Match matches the regexp against the URL host or path. +func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool { + if !r.matchHost { + if r.matchQuery { + return r.regexp.MatchString(req.URL.RawQuery) + } else { + return r.regexp.MatchString(req.URL.Path) + } + } + return r.regexp.MatchString(getHost(req)) +} + +// url builds a URL part using the given values. +func (r *routeRegexp) url(pairs ...string) (string, error) { + values, err := mapFromPairs(pairs...) + if err != nil { + return "", err + } + urlValues := make([]interface{}, len(r.varsN)) + for k, v := range r.varsN { + value, ok := values[v] + if !ok { + return "", fmt.Errorf("mux: missing route variable %q", v) + } + urlValues[k] = value + } + rv := fmt.Sprintf(r.reverse, urlValues...) + if !r.regexp.MatchString(rv) { + // The URL is checked against the full regexp, instead of checking + // individual variables. This is faster but to provide a good error + // message, we check individual regexps if the URL doesn't match. + for k, v := range r.varsN { + if !r.varsR[k].MatchString(values[v]) { + return "", fmt.Errorf( + "mux: variable %q doesn't match, expected %q", values[v], + r.varsR[k].String()) + } + } + } + return rv, nil +} + +// braceIndices returns the first level curly brace indices from a string. +// It returns an error in case of unbalanced braces. +func braceIndices(s string) ([]int, error) { + var level, idx int + idxs := make([]int, 0) + for i := 0; i < len(s); i++ { + switch s[i] { + case '{': + if level++; level == 1 { + idx = i + } + case '}': + if level--; level == 0 { + idxs = append(idxs, idx, i+1) + } else if level < 0 { + return nil, fmt.Errorf("mux: unbalanced braces in %q", s) + } + } + } + if level != 0 { + return nil, fmt.Errorf("mux: unbalanced braces in %q", s) + } + return idxs, nil +} + +// ---------------------------------------------------------------------------- +// routeRegexpGroup +// ---------------------------------------------------------------------------- + +// routeRegexpGroup groups the route matchers that carry variables. +type routeRegexpGroup struct { + host *routeRegexp + path *routeRegexp + queries []*routeRegexp +} + +// setMatch extracts the variables from the URL once a route matches. +func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) { + // Store host variables. + if v.host != nil { + hostVars := v.host.regexp.FindStringSubmatch(getHost(req)) + if hostVars != nil { + for k, v := range v.host.varsN { + m.Vars[v] = hostVars[k+1] + } + } + } + // Store path variables. + if v.path != nil { + pathVars := v.path.regexp.FindStringSubmatch(req.URL.Path) + if pathVars != nil { + for k, v := range v.path.varsN { + m.Vars[v] = pathVars[k+1] + } + // Check if we should redirect. + if v.path.strictSlash { + p1 := strings.HasSuffix(req.URL.Path, "/") + p2 := strings.HasSuffix(v.path.template, "/") + if p1 != p2 { + u, _ := url.Parse(req.URL.String()) + if p1 { + u.Path = u.Path[:len(u.Path)-1] + } else { + u.Path += "/" + } + m.Handler = http.RedirectHandler(u.String(), 301) + } + } + } + } + // Store query string variables. + rawQuery := req.URL.RawQuery + for _, q := range v.queries { + queryVars := q.regexp.FindStringSubmatch(rawQuery) + if queryVars != nil { + for k, v := range q.varsN { + m.Vars[v] = queryVars[k+1] + } + } + } +} + +// getHost tries its best to return the request host. +func getHost(r *http.Request) string { + if r.URL.IsAbs() { + return r.URL.Host + } + host := r.Host + // Slice off any port information. + if i := strings.Index(host, ":"); i != -1 { + host = host[:i] + } + return host + +} diff --git a/tests/Godeps/_workspace/src/github.com/gorilla/mux/route.go b/tests/Godeps/_workspace/src/github.com/gorilla/mux/route.go new file mode 100644 index 0000000000..c310e66bc7 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/gorilla/mux/route.go @@ -0,0 +1,524 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "strings" +) + +// Route stores information to match a request and build URLs. +type Route struct { + // Parent where the route was registered (a Router). + parent parentRoute + // Request handler for the route. + handler http.Handler + // List of matchers. + matchers []matcher + // Manager for the variables from host and path. + regexp *routeRegexpGroup + // If true, when the path pattern is "/path/", accessing "/path" will + // redirect to the former and vice versa. + strictSlash bool + // If true, this route never matches: it is only used to build URLs. + buildOnly bool + // The name used to build URLs. + name string + // Error resulted from building a route. + err error +} + +// Match matches the route against the request. +func (r *Route) Match(req *http.Request, match *RouteMatch) bool { + if r.buildOnly || r.err != nil { + return false + } + // Match everything. + for _, m := range r.matchers { + if matched := m.Match(req, match); !matched { + return false + } + } + // Yay, we have a match. Let's collect some info about it. + if match.Route == nil { + match.Route = r + } + if match.Handler == nil { + match.Handler = r.handler + } + if match.Vars == nil { + match.Vars = make(map[string]string) + } + // Set variables. + if r.regexp != nil { + r.regexp.setMatch(req, match, r) + } + return true +} + +// ---------------------------------------------------------------------------- +// Route attributes +// ---------------------------------------------------------------------------- + +// GetError returns an error resulted from building the route, if any. +func (r *Route) GetError() error { + return r.err +} + +// BuildOnly sets the route to never match: it is only used to build URLs. +func (r *Route) BuildOnly() *Route { + r.buildOnly = true + return r +} + +// Handler -------------------------------------------------------------------- + +// Handler sets a handler for the route. +func (r *Route) Handler(handler http.Handler) *Route { + if r.err == nil { + r.handler = handler + } + return r +} + +// HandlerFunc sets a handler function for the route. +func (r *Route) HandlerFunc(f func(http.ResponseWriter, *http.Request)) *Route { + return r.Handler(http.HandlerFunc(f)) +} + +// GetHandler returns the handler for the route, if any. +func (r *Route) GetHandler() http.Handler { + return r.handler +} + +// Name ----------------------------------------------------------------------- + +// Name sets the name for the route, used to build URLs. +// If the name was registered already it will be overwritten. +func (r *Route) Name(name string) *Route { + if r.name != "" { + r.err = fmt.Errorf("mux: route already has name %q, can't set %q", + r.name, name) + } + if r.err == nil { + r.name = name + r.getNamedRoutes()[name] = r + } + return r +} + +// GetName returns the name for the route, if any. +func (r *Route) GetName() string { + return r.name +} + +// ---------------------------------------------------------------------------- +// Matchers +// ---------------------------------------------------------------------------- + +// matcher types try to match a request. +type matcher interface { + Match(*http.Request, *RouteMatch) bool +} + +// addMatcher adds a matcher to the route. +func (r *Route) addMatcher(m matcher) *Route { + if r.err == nil { + r.matchers = append(r.matchers, m) + } + return r +} + +// addRegexpMatcher adds a host or path matcher and builder to a route. +func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery bool) error { + if r.err != nil { + return r.err + } + r.regexp = r.getRegexpGroup() + if !matchHost && !matchQuery { + if len(tpl) == 0 || tpl[0] != '/' { + return fmt.Errorf("mux: path must start with a slash, got %q", tpl) + } + if r.regexp.path != nil { + tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl + } + } + rr, err := newRouteRegexp(tpl, matchHost, matchPrefix, matchQuery, r.strictSlash) + if err != nil { + return err + } + for _, q := range r.regexp.queries { + if err = uniqueVars(rr.varsN, q.varsN); err != nil { + return err + } + } + if matchHost { + if r.regexp.path != nil { + if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil { + return err + } + } + r.regexp.host = rr + } else { + if r.regexp.host != nil { + if err = uniqueVars(rr.varsN, r.regexp.host.varsN); err != nil { + return err + } + } + if matchQuery { + r.regexp.queries = append(r.regexp.queries, rr) + } else { + r.regexp.path = rr + } + } + r.addMatcher(rr) + return nil +} + +// Headers -------------------------------------------------------------------- + +// headerMatcher matches the request against header values. +type headerMatcher map[string]string + +func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchMap(m, r.Header, true) +} + +// Headers adds a matcher for request header values. +// It accepts a sequence of key/value pairs to be matched. For example: +// +// r := mux.NewRouter() +// r.Headers("Content-Type", "application/json", +// "X-Requested-With", "XMLHttpRequest") +// +// The above route will only match if both request header values match. +// +// It the value is an empty string, it will match any value if the key is set. +func (r *Route) Headers(pairs ...string) *Route { + if r.err == nil { + var headers map[string]string + headers, r.err = mapFromPairs(pairs...) + return r.addMatcher(headerMatcher(headers)) + } + return r +} + +// Host ----------------------------------------------------------------------- + +// Host adds a matcher for the URL host. +// It accepts a template with zero or more URL variables enclosed by {}. +// Variables can define an optional regexp pattern to me matched: +// +// - {name} matches anything until the next dot. +// +// - {name:pattern} matches the given regexp pattern. +// +// For example: +// +// r := mux.NewRouter() +// r.Host("www.domain.com") +// r.Host("{subdomain}.domain.com") +// r.Host("{subdomain:[a-z]+}.domain.com") +// +// Variable names must be unique in a given route. They can be retrieved +// calling mux.Vars(request). +func (r *Route) Host(tpl string) *Route { + r.err = r.addRegexpMatcher(tpl, true, false, false) + return r +} + +// MatcherFunc ---------------------------------------------------------------- + +// MatcherFunc is the function signature used by custom matchers. +type MatcherFunc func(*http.Request, *RouteMatch) bool + +func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool { + return m(r, match) +} + +// MatcherFunc adds a custom function to be used as request matcher. +func (r *Route) MatcherFunc(f MatcherFunc) *Route { + return r.addMatcher(f) +} + +// Methods -------------------------------------------------------------------- + +// methodMatcher matches the request against HTTP methods. +type methodMatcher []string + +func (m methodMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchInArray(m, r.Method) +} + +// Methods adds a matcher for HTTP methods. +// It accepts a sequence of one or more methods to be matched, e.g.: +// "GET", "POST", "PUT". +func (r *Route) Methods(methods ...string) *Route { + for k, v := range methods { + methods[k] = strings.ToUpper(v) + } + return r.addMatcher(methodMatcher(methods)) +} + +// Path ----------------------------------------------------------------------- + +// Path adds a matcher for the URL path. +// It accepts a template with zero or more URL variables enclosed by {}. The +// template must start with a "/". +// Variables can define an optional regexp pattern to me matched: +// +// - {name} matches anything until the next slash. +// +// - {name:pattern} matches the given regexp pattern. +// +// For example: +// +// r := mux.NewRouter() +// r.Path("/products/").Handler(ProductsHandler) +// r.Path("/products/{key}").Handler(ProductsHandler) +// r.Path("/articles/{category}/{id:[0-9]+}"). +// Handler(ArticleHandler) +// +// Variable names must be unique in a given route. They can be retrieved +// calling mux.Vars(request). +func (r *Route) Path(tpl string) *Route { + r.err = r.addRegexpMatcher(tpl, false, false, false) + return r +} + +// PathPrefix ----------------------------------------------------------------- + +// PathPrefix adds a matcher for the URL path prefix. This matches if the given +// template is a prefix of the full URL path. See Route.Path() for details on +// the tpl argument. +// +// Note that it does not treat slashes specially ("/foobar/" will be matched by +// the prefix "/foo") so you may want to use a trailing slash here. +// +// Also note that the setting of Router.StrictSlash() has no effect on routes +// with a PathPrefix matcher. +func (r *Route) PathPrefix(tpl string) *Route { + r.err = r.addRegexpMatcher(tpl, false, true, false) + return r +} + +// Query ---------------------------------------------------------------------- + +// Queries adds a matcher for URL query values. +// It accepts a sequence of key/value pairs. Values may define variables. +// For example: +// +// r := mux.NewRouter() +// r.Queries("foo", "bar", "id", "{id:[0-9]+}") +// +// The above route will only match if the URL contains the defined queries +// values, e.g.: ?foo=bar&id=42. +// +// It the value is an empty string, it will match any value if the key is set. +// +// Variables can define an optional regexp pattern to me matched: +// +// - {name} matches anything until the next slash. +// +// - {name:pattern} matches the given regexp pattern. +func (r *Route) Queries(pairs ...string) *Route { + length := len(pairs) + if length%2 != 0 { + r.err = fmt.Errorf( + "mux: number of parameters must be multiple of 2, got %v", pairs) + return nil + } + for i := 0; i < length; i += 2 { + if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], false, true, true); r.err != nil { + return r + } + } + + return r +} + +// Schemes -------------------------------------------------------------------- + +// schemeMatcher matches the request against URL schemes. +type schemeMatcher []string + +func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchInArray(m, r.URL.Scheme) +} + +// Schemes adds a matcher for URL schemes. +// It accepts a sequence of schemes to be matched, e.g.: "http", "https". +func (r *Route) Schemes(schemes ...string) *Route { + for k, v := range schemes { + schemes[k] = strings.ToLower(v) + } + return r.addMatcher(schemeMatcher(schemes)) +} + +// Subrouter ------------------------------------------------------------------ + +// Subrouter creates a subrouter for the route. +// +// It will test the inner routes only if the parent route matched. For example: +// +// r := mux.NewRouter() +// s := r.Host("www.domain.com").Subrouter() +// s.HandleFunc("/products/", ProductsHandler) +// s.HandleFunc("/products/{key}", ProductHandler) +// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) +// +// Here, the routes registered in the subrouter won't be tested if the host +// doesn't match. +func (r *Route) Subrouter() *Router { + router := &Router{parent: r, strictSlash: r.strictSlash} + r.addMatcher(router) + return router +} + +// ---------------------------------------------------------------------------- +// URL building +// ---------------------------------------------------------------------------- + +// URL builds a URL for the route. +// +// It accepts a sequence of key/value pairs for the route variables. For +// example, given this route: +// +// r := mux.NewRouter() +// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). +// Name("article") +// +// ...a URL for it can be built using: +// +// url, err := r.Get("article").URL("category", "technology", "id", "42") +// +// ...which will return an url.URL with the following path: +// +// "/articles/technology/42" +// +// This also works for host variables: +// +// r := mux.NewRouter() +// r.Host("{subdomain}.domain.com"). +// HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). +// Name("article") +// +// // url.String() will be "http://news.domain.com/articles/technology/42" +// url, err := r.Get("article").URL("subdomain", "news", +// "category", "technology", +// "id", "42") +// +// All variables defined in the route are required, and their values must +// conform to the corresponding patterns. +func (r *Route) URL(pairs ...string) (*url.URL, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp == nil { + return nil, errors.New("mux: route doesn't have a host or path") + } + var scheme, host, path string + var err error + if r.regexp.host != nil { + // Set a default scheme. + scheme = "http" + if host, err = r.regexp.host.url(pairs...); err != nil { + return nil, err + } + } + if r.regexp.path != nil { + if path, err = r.regexp.path.url(pairs...); err != nil { + return nil, err + } + } + return &url.URL{ + Scheme: scheme, + Host: host, + Path: path, + }, nil +} + +// URLHost builds the host part of the URL for a route. See Route.URL(). +// +// The route must have a host defined. +func (r *Route) URLHost(pairs ...string) (*url.URL, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp == nil || r.regexp.host == nil { + return nil, errors.New("mux: route doesn't have a host") + } + host, err := r.regexp.host.url(pairs...) + if err != nil { + return nil, err + } + return &url.URL{ + Scheme: "http", + Host: host, + }, nil +} + +// URLPath builds the path part of the URL for a route. See Route.URL(). +// +// The route must have a path defined. +func (r *Route) URLPath(pairs ...string) (*url.URL, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp == nil || r.regexp.path == nil { + return nil, errors.New("mux: route doesn't have a path") + } + path, err := r.regexp.path.url(pairs...) + if err != nil { + return nil, err + } + return &url.URL{ + Path: path, + }, nil +} + +// ---------------------------------------------------------------------------- +// parentRoute +// ---------------------------------------------------------------------------- + +// parentRoute allows routes to know about parent host and path definitions. +type parentRoute interface { + getNamedRoutes() map[string]*Route + getRegexpGroup() *routeRegexpGroup +} + +// getNamedRoutes returns the map where named routes are registered. +func (r *Route) getNamedRoutes() map[string]*Route { + if r.parent == nil { + // During tests router is not always set. + r.parent = NewRouter() + } + return r.parent.getNamedRoutes() +} + +// getRegexpGroup returns regexp definitions from this route. +func (r *Route) getRegexpGroup() *routeRegexpGroup { + if r.regexp == nil { + if r.parent == nil { + // During tests router is not always set. + r.parent = NewRouter() + } + regexp := r.parent.getRegexpGroup() + if regexp == nil { + r.regexp = new(routeRegexpGroup) + } else { + // Copy. + r.regexp = &routeRegexpGroup{ + host: regexp.host, + path: regexp.path, + queries: regexp.queries, + } + } + } + return r.regexp +} diff --git a/tests/_vendor/src/github.com/kballard/go-shellquote/LICENSE b/tests/Godeps/_workspace/src/github.com/kballard/go-shellquote/LICENSE similarity index 100% rename from tests/_vendor/src/github.com/kballard/go-shellquote/LICENSE rename to tests/Godeps/_workspace/src/github.com/kballard/go-shellquote/LICENSE diff --git a/tests/Godeps/_workspace/src/github.com/kballard/go-shellquote/README b/tests/Godeps/_workspace/src/github.com/kballard/go-shellquote/README new file mode 100644 index 0000000000..4d34e87afc --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/kballard/go-shellquote/README @@ -0,0 +1,36 @@ +PACKAGE + +package shellquote + import "github.com/kballard/go-shellquote" + + Shellquote provides utilities for joining/splitting strings using sh's + word-splitting rules. + +VARIABLES + +var ( + UnterminatedSingleQuoteError = errors.New("Unterminated single-quoted string") + UnterminatedDoubleQuoteError = errors.New("Unterminated double-quoted string") + UnterminatedEscapeError = errors.New("Unterminated backslash-escape") +) + + +FUNCTIONS + +func Join(args ...string) string + Join quotes each argument and joins them with a space. If passed to + /bin/sh, the resulting string will be split back into the original + arguments. + +func Split(input string) (words []string, err error) + Split splits a string according to /bin/sh's word-splitting rules. It + supports backslash-escapes, single-quotes, and double-quotes. Notably it + does not support the $'' style of quoting. It also doesn't attempt to + perform any other sort of expansion, including brace expansion, shell + expansion, or pathname expansion. + + If the given input has an unterminated quoted string or ends in a + backslash-escape, one of UnterminatedSingleQuoteError, + UnterminatedDoubleQuoteError, or UnterminatedEscapeError is returned. + + diff --git a/tests/Godeps/_workspace/src/github.com/kballard/go-shellquote/both_test.go b/tests/Godeps/_workspace/src/github.com/kballard/go-shellquote/both_test.go new file mode 100644 index 0000000000..9cba3c8491 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/kballard/go-shellquote/both_test.go @@ -0,0 +1,29 @@ +package shellquote + +import ( + "reflect" + "testing" + "testing/quick" +) + +// this is called bothtest because it tests Split and Join together + +func TestJoinSplit(t *testing.T) { + f := func(strs []string) bool { + // Join, then split, the input + combined := Join(strs...) + split, err := Split(combined) + if err != nil { + t.Logf("Error splitting %#v: %v", combined, err) + return false + } + if !reflect.DeepEqual(strs, split) { + t.Logf("Input %q did not match output %q", strs, split) + return false + } + return true + } + if err := quick.Check(f, nil); err != nil { + t.Error(err) + } +} diff --git a/tests/_vendor/src/github.com/kballard/go-shellquote/doc.go b/tests/Godeps/_workspace/src/github.com/kballard/go-shellquote/doc.go similarity index 100% rename from tests/_vendor/src/github.com/kballard/go-shellquote/doc.go rename to tests/Godeps/_workspace/src/github.com/kballard/go-shellquote/doc.go diff --git a/tests/_vendor/src/github.com/kballard/go-shellquote/quote.go b/tests/Godeps/_workspace/src/github.com/kballard/go-shellquote/quote.go similarity index 100% rename from tests/_vendor/src/github.com/kballard/go-shellquote/quote.go rename to tests/Godeps/_workspace/src/github.com/kballard/go-shellquote/quote.go diff --git a/tests/Godeps/_workspace/src/github.com/kballard/go-shellquote/quote_test.go b/tests/Godeps/_workspace/src/github.com/kballard/go-shellquote/quote_test.go new file mode 100644 index 0000000000..a4d2d82fb2 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/kballard/go-shellquote/quote_test.go @@ -0,0 +1,28 @@ +package shellquote + +import ( + "testing" +) + +func TestSimpleJoin(t *testing.T) { + for _, elem := range simpleJoinTest { + output := Join(elem.input...) + if output != elem.output { + t.Errorf("Input %q, got %q, expected %q", elem.input, output, elem.output) + } + } +} + +var simpleJoinTest = []struct { + input []string + output string +}{ + {[]string{"test"}, "test"}, + {[]string{"hello goodbye"}, "'hello goodbye'"}, + {[]string{"hello", "goodbye"}, "hello goodbye"}, + {[]string{"don't you know the dewey decimal system?"}, "'don'\\''t you know the dewey decimal system?'"}, + {[]string{"don't", "you", "know", "the", "dewey", "decimal", "system?"}, "don\\'t you know the dewey decimal system\\?"}, + {[]string{"~user", "u~ser", " ~user", "!~user"}, "\\~user u~ser ' ~user' \\!~user"}, + {[]string{"foo*", "M{ovies,usic}", "ab[cd]", "%3"}, "foo\\* M\\{ovies,usic} ab\\[cd] %3"}, + {[]string{"one", "", "three"}, "one '' three"}, +} diff --git a/tests/_vendor/src/github.com/kballard/go-shellquote/unquote.go b/tests/Godeps/_workspace/src/github.com/kballard/go-shellquote/unquote.go similarity index 100% rename from tests/_vendor/src/github.com/kballard/go-shellquote/unquote.go rename to tests/Godeps/_workspace/src/github.com/kballard/go-shellquote/unquote.go diff --git a/tests/Godeps/_workspace/src/github.com/kballard/go-shellquote/unquote_test.go b/tests/Godeps/_workspace/src/github.com/kballard/go-shellquote/unquote_test.go new file mode 100644 index 0000000000..1444a9f98a --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/kballard/go-shellquote/unquote_test.go @@ -0,0 +1,53 @@ +package shellquote + +import ( + "reflect" + "testing" +) + +func TestSimpleSplit(t *testing.T) { + for _, elem := range simpleSplitTest { + output, err := Split(elem.input) + if err != nil { + t.Errorf("Input %q, got error %#v", elem.input, err) + } else if !reflect.DeepEqual(output, elem.output) { + t.Errorf("Input %q, got %q, expected %q", elem.input, output, elem.output) + } + } +} + +func TestErrorSplit(t *testing.T) { + for _, elem := range errorSplitTest { + _, err := Split(elem.input) + if err != elem.error { + t.Errorf("Input %q, got error %#v, expected error %#v", elem.input, err, elem.error) + } + } +} + +var simpleSplitTest = []struct { + input string + output []string +}{ + {"hello", []string{"hello"}}, + {"hello goodbye", []string{"hello", "goodbye"}}, + {"hello goodbye", []string{"hello", "goodbye"}}, + {"glob* test?", []string{"glob*", "test?"}}, + {"don\\'t you know the dewey decimal system\\?", []string{"don't", "you", "know", "the", "dewey", "decimal", "system?"}}, + {"'don'\\''t you know the dewey decimal system?'", []string{"don't you know the dewey decimal system?"}}, + {"one '' two", []string{"one", "", "two"}}, + {"text with\\\na backslash-escaped newline", []string{"text", "witha", "backslash-escaped", "newline"}}, + {"text \"with\na\" quoted newline", []string{"text", "with\na", "quoted", "newline"}}, + {"\"quoted\\d\\\\\\\" text with\\\na backslash-escaped newline\"", []string{"quoted\\d\\\" text witha backslash-escaped newline"}}, + {"foo\"bar\"baz", []string{"foobarbaz"}}, +} + +var errorSplitTest = []struct { + input string + error error +}{ + {"don't worry", UnterminatedSingleQuoteError}, + {"'test'\\''ing", UnterminatedSingleQuoteError}, + {"\"foo'bar", UnterminatedDoubleQuoteError}, + {"foo\\", UnterminatedEscapeError}, +} diff --git a/tests/Godeps/_workspace/src/github.com/kr/pty/.gitignore b/tests/Godeps/_workspace/src/github.com/kr/pty/.gitignore new file mode 100644 index 0000000000..1f0a99f2f2 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/kr/pty/.gitignore @@ -0,0 +1,4 @@ +[568].out +_go* +_test* +_obj diff --git a/tests/_vendor/src/github.com/kr/pty/License b/tests/Godeps/_workspace/src/github.com/kr/pty/License similarity index 100% rename from tests/_vendor/src/github.com/kr/pty/License rename to tests/Godeps/_workspace/src/github.com/kr/pty/License diff --git a/tests/Godeps/_workspace/src/github.com/kr/pty/README.md b/tests/Godeps/_workspace/src/github.com/kr/pty/README.md new file mode 100644 index 0000000000..7b7900c3ae --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/kr/pty/README.md @@ -0,0 +1,36 @@ +# pty + +Pty is a Go package for using unix pseudo-terminals. + +## Install + + go get github.com/kr/pty + +## Example + +```go +package main + +import ( + "github.com/kr/pty" + "io" + "os" + "os/exec" +) + +func main() { + c := exec.Command("grep", "--color=auto", "bar") + f, err := pty.Start(c) + if err != nil { + panic(err) + } + + go func() { + f.Write([]byte("foo\n")) + f.Write([]byte("bar\n")) + f.Write([]byte("baz\n")) + f.Write([]byte{4}) // EOT + }() + io.Copy(os.Stdout, f) +} +``` diff --git a/tests/_vendor/src/github.com/kr/pty/doc.go b/tests/Godeps/_workspace/src/github.com/kr/pty/doc.go similarity index 100% rename from tests/_vendor/src/github.com/kr/pty/doc.go rename to tests/Godeps/_workspace/src/github.com/kr/pty/doc.go diff --git a/tests/_vendor/src/github.com/kr/pty/ioctl.go b/tests/Godeps/_workspace/src/github.com/kr/pty/ioctl.go similarity index 100% rename from tests/_vendor/src/github.com/kr/pty/ioctl.go rename to tests/Godeps/_workspace/src/github.com/kr/pty/ioctl.go diff --git a/tests/_vendor/src/github.com/kr/pty/ioctl_bsd.go b/tests/Godeps/_workspace/src/github.com/kr/pty/ioctl_bsd.go similarity index 100% rename from tests/_vendor/src/github.com/kr/pty/ioctl_bsd.go rename to tests/Godeps/_workspace/src/github.com/kr/pty/ioctl_bsd.go diff --git a/tests/_vendor/src/github.com/kr/pty/ioctl_linux.go b/tests/Godeps/_workspace/src/github.com/kr/pty/ioctl_linux.go similarity index 100% rename from tests/_vendor/src/github.com/kr/pty/ioctl_linux.go rename to tests/Godeps/_workspace/src/github.com/kr/pty/ioctl_linux.go diff --git a/tests/Godeps/_workspace/src/github.com/kr/pty/mktypes.bash b/tests/Godeps/_workspace/src/github.com/kr/pty/mktypes.bash new file mode 100644 index 0000000000..9952c88838 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/kr/pty/mktypes.bash @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +GOOSARCH="${GOOS}_${GOARCH}" +case "$GOOSARCH" in +_* | *_ | _) + echo 'undefined $GOOS_$GOARCH:' "$GOOSARCH" 1>&2 + exit 1 + ;; +esac + +GODEFS="go tool cgo -godefs" + +$GODEFS types.go |gofmt > ztypes_$GOARCH.go + +case $GOOS in +freebsd) + $GODEFS types_$GOOS.go |gofmt > ztypes_$GOOSARCH.go + ;; +esac diff --git a/tests/_vendor/src/github.com/kr/pty/pty_darwin.go b/tests/Godeps/_workspace/src/github.com/kr/pty/pty_darwin.go similarity index 100% rename from tests/_vendor/src/github.com/kr/pty/pty_darwin.go rename to tests/Godeps/_workspace/src/github.com/kr/pty/pty_darwin.go diff --git a/tests/_vendor/src/github.com/kr/pty/pty_freebsd.go b/tests/Godeps/_workspace/src/github.com/kr/pty/pty_freebsd.go similarity index 100% rename from tests/_vendor/src/github.com/kr/pty/pty_freebsd.go rename to tests/Godeps/_workspace/src/github.com/kr/pty/pty_freebsd.go diff --git a/tests/_vendor/src/github.com/kr/pty/pty_linux.go b/tests/Godeps/_workspace/src/github.com/kr/pty/pty_linux.go similarity index 100% rename from tests/_vendor/src/github.com/kr/pty/pty_linux.go rename to tests/Godeps/_workspace/src/github.com/kr/pty/pty_linux.go diff --git a/tests/_vendor/src/github.com/kr/pty/pty_unsupported.go b/tests/Godeps/_workspace/src/github.com/kr/pty/pty_unsupported.go similarity index 100% rename from tests/_vendor/src/github.com/kr/pty/pty_unsupported.go rename to tests/Godeps/_workspace/src/github.com/kr/pty/pty_unsupported.go diff --git a/tests/_vendor/src/github.com/kr/pty/run.go b/tests/Godeps/_workspace/src/github.com/kr/pty/run.go similarity index 100% rename from tests/_vendor/src/github.com/kr/pty/run.go rename to tests/Godeps/_workspace/src/github.com/kr/pty/run.go diff --git a/tests/_vendor/src/github.com/kr/pty/types.go b/tests/Godeps/_workspace/src/github.com/kr/pty/types.go similarity index 100% rename from tests/_vendor/src/github.com/kr/pty/types.go rename to tests/Godeps/_workspace/src/github.com/kr/pty/types.go diff --git a/tests/_vendor/src/github.com/kr/pty/types_freebsd.go b/tests/Godeps/_workspace/src/github.com/kr/pty/types_freebsd.go similarity index 100% rename from tests/_vendor/src/github.com/kr/pty/types_freebsd.go rename to tests/Godeps/_workspace/src/github.com/kr/pty/types_freebsd.go diff --git a/tests/_vendor/src/github.com/kr/pty/util.go b/tests/Godeps/_workspace/src/github.com/kr/pty/util.go similarity index 100% rename from tests/_vendor/src/github.com/kr/pty/util.go rename to tests/Godeps/_workspace/src/github.com/kr/pty/util.go diff --git a/tests/_vendor/src/github.com/kr/pty/ztypes_386.go b/tests/Godeps/_workspace/src/github.com/kr/pty/ztypes_386.go similarity index 100% rename from tests/_vendor/src/github.com/kr/pty/ztypes_386.go rename to tests/Godeps/_workspace/src/github.com/kr/pty/ztypes_386.go diff --git a/tests/_vendor/src/github.com/kr/pty/ztypes_amd64.go b/tests/Godeps/_workspace/src/github.com/kr/pty/ztypes_amd64.go similarity index 100% rename from tests/_vendor/src/github.com/kr/pty/ztypes_amd64.go rename to tests/Godeps/_workspace/src/github.com/kr/pty/ztypes_amd64.go diff --git a/tests/_vendor/src/github.com/kr/pty/ztypes_arm.go b/tests/Godeps/_workspace/src/github.com/kr/pty/ztypes_arm.go similarity index 100% rename from tests/_vendor/src/github.com/kr/pty/ztypes_arm.go rename to tests/Godeps/_workspace/src/github.com/kr/pty/ztypes_arm.go diff --git a/tests/_vendor/src/github.com/kr/pty/ztypes_freebsd_386.go b/tests/Godeps/_workspace/src/github.com/kr/pty/ztypes_freebsd_386.go similarity index 100% rename from tests/_vendor/src/github.com/kr/pty/ztypes_freebsd_386.go rename to tests/Godeps/_workspace/src/github.com/kr/pty/ztypes_freebsd_386.go diff --git a/tests/_vendor/src/github.com/kr/pty/ztypes_freebsd_amd64.go b/tests/Godeps/_workspace/src/github.com/kr/pty/ztypes_freebsd_amd64.go similarity index 100% rename from tests/_vendor/src/github.com/kr/pty/ztypes_freebsd_amd64.go rename to tests/Godeps/_workspace/src/github.com/kr/pty/ztypes_freebsd_amd64.go diff --git a/tests/_vendor/src/github.com/kr/pty/ztypes_freebsd_arm.go b/tests/Godeps/_workspace/src/github.com/kr/pty/ztypes_freebsd_arm.go similarity index 100% rename from tests/_vendor/src/github.com/kr/pty/ztypes_freebsd_arm.go rename to tests/Godeps/_workspace/src/github.com/kr/pty/ztypes_freebsd_arm.go diff --git a/tests/_vendor/src/github.com/tchap/go-patricia/patricia/children.go b/tests/Godeps/_workspace/src/github.com/tchap/go-patricia/patricia/children.go similarity index 100% rename from tests/_vendor/src/github.com/tchap/go-patricia/patricia/children.go rename to tests/Godeps/_workspace/src/github.com/tchap/go-patricia/patricia/children.go diff --git a/tests/_vendor/src/github.com/tchap/go-patricia/patricia/patricia.go b/tests/Godeps/_workspace/src/github.com/tchap/go-patricia/patricia/patricia.go similarity index 100% rename from tests/_vendor/src/github.com/tchap/go-patricia/patricia/patricia.go rename to tests/Godeps/_workspace/src/github.com/tchap/go-patricia/patricia/patricia.go diff --git a/tests/Godeps/_workspace/src/github.com/tchap/go-patricia/patricia/patricia_dense_test.go b/tests/Godeps/_workspace/src/github.com/tchap/go-patricia/patricia/patricia_dense_test.go new file mode 100644 index 0000000000..346e9a66cb --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/tchap/go-patricia/patricia/patricia_dense_test.go @@ -0,0 +1,161 @@ +// Copyright (c) 2014 The go-patricia AUTHORS +// +// Use of this source code is governed by The MIT License +// that can be found in the LICENSE file. + +package patricia + +import ( + "testing" +) + +// Tests ----------------------------------------------------------------------- + +func TestTrie_InsertDense(t *testing.T) { + trie := NewTrie() + + data := []testData{ + {"aba", 0, success}, + {"abb", 1, success}, + {"abc", 2, success}, + {"abd", 3, success}, + {"abe", 4, success}, + {"abf", 5, success}, + {"abg", 6, success}, + {"abh", 7, success}, + {"abi", 8, success}, + {"abj", 9, success}, + {"abk", 0, success}, + {"abl", 1, success}, + {"abm", 2, success}, + {"abn", 3, success}, + {"abo", 4, success}, + {"abp", 5, success}, + {"abq", 6, success}, + {"abr", 7, success}, + {"abs", 8, success}, + {"abt", 9, success}, + {"abu", 0, success}, + {"abv", 1, success}, + {"abw", 2, success}, + {"abx", 3, success}, + {"aby", 4, success}, + {"abz", 5, success}, + } + + for _, v := range data { + t.Logf("INSERT prefix=%v, item=%v, success=%v", v.key, v.value, v.retVal) + if ok := trie.Insert(Prefix(v.key), v.value); ok != v.retVal { + t.Errorf("Unexpected return value, expected=%v, got=%v", v.retVal, ok) + } + } +} + +func TestTrie_InsertDensePreceeding(t *testing.T) { + trie := NewTrie() + start := byte(70) + // create a dense node + for i := byte(0); i <= MaxChildrenPerSparseNode; i++ { + if !trie.Insert(Prefix([]byte{start + i}), true) { + t.Errorf("insert failed, prefix=%v", start+i) + } + } + // insert some preceeding keys + for i := byte(1); i < start; i *= i + 1 { + if !trie.Insert(Prefix([]byte{start - i}), true) { + t.Errorf("insert failed, prefix=%v", start-i) + } + } +} + +func TestTrie_InsertDenseDuplicatePrefixes(t *testing.T) { + trie := NewTrie() + + data := []testData{ + {"aba", 0, success}, + {"abb", 1, success}, + {"abc", 2, success}, + {"abd", 3, success}, + {"abe", 4, success}, + {"abf", 5, success}, + {"abg", 6, success}, + {"abh", 7, success}, + {"abi", 8, success}, + {"abj", 9, success}, + {"abk", 0, success}, + {"abl", 1, success}, + {"abm", 2, success}, + {"abn", 3, success}, + {"abo", 4, success}, + {"abp", 5, success}, + {"abq", 6, success}, + {"abr", 7, success}, + {"abs", 8, success}, + {"abt", 9, success}, + {"abu", 0, success}, + {"abv", 1, success}, + {"abw", 2, success}, + {"abx", 3, success}, + {"aby", 4, success}, + {"abz", 5, success}, + {"aba", 0, failure}, + {"abb", 1, failure}, + {"abc", 2, failure}, + {"abd", 3, failure}, + {"abe", 4, failure}, + } + + for _, v := range data { + t.Logf("INSERT prefix=%v, item=%v, success=%v", v.key, v.value, v.retVal) + if ok := trie.Insert(Prefix(v.key), v.value); ok != v.retVal { + t.Errorf("Unexpected return value, expected=%v, got=%v", v.retVal, ok) + } + } +} + +func TestTrie_DeleteDense(t *testing.T) { + trie := NewTrie() + + data := []testData{ + {"aba", 0, success}, + {"abb", 1, success}, + {"abc", 2, success}, + {"abd", 3, success}, + {"abe", 4, success}, + {"abf", 5, success}, + {"abg", 6, success}, + {"abh", 7, success}, + {"abi", 8, success}, + {"abj", 9, success}, + {"abk", 0, success}, + {"abl", 1, success}, + {"abm", 2, success}, + {"abn", 3, success}, + {"abo", 4, success}, + {"abp", 5, success}, + {"abq", 6, success}, + {"abr", 7, success}, + {"abs", 8, success}, + {"abt", 9, success}, + {"abu", 0, success}, + {"abv", 1, success}, + {"abw", 2, success}, + {"abx", 3, success}, + {"aby", 4, success}, + {"abz", 5, success}, + } + + for _, v := range data { + t.Logf("INSERT prefix=%v, item=%v, success=%v", v.key, v.value, v.retVal) + if ok := trie.Insert(Prefix(v.key), v.value); ok != v.retVal { + t.Errorf("Unexpected return value, expected=%v, got=%v", v.retVal, ok) + } + } + + for _, v := range data { + t.Logf("DELETE word=%v, success=%v", v.key, v.retVal) + if ok := trie.Delete([]byte(v.key)); ok != v.retVal { + t.Errorf("Unexpected return value, expected=%v, got=%v", v.retVal, ok) + } + } +} diff --git a/tests/Godeps/_workspace/src/github.com/tchap/go-patricia/patricia/patricia_sparse_test.go b/tests/Godeps/_workspace/src/github.com/tchap/go-patricia/patricia/patricia_sparse_test.go new file mode 100644 index 0000000000..27f3c878b5 --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/tchap/go-patricia/patricia/patricia_sparse_test.go @@ -0,0 +1,659 @@ +// Copyright (c) 2014 The go-patricia AUTHORS +// +// Use of this source code is governed by The MIT License +// that can be found in the LICENSE file. + +package patricia + +import ( + "bytes" + "errors" + "fmt" + "strings" + "testing" +) + +const ( + success = true + failure = false +) + +type testData struct { + key string + value interface{} + retVal bool +} + +// Tests ----------------------------------------------------------------------- + +func TestTrie_InsertDifferentPrefixes(t *testing.T) { + trie := NewTrie() + + data := []testData{ + {"Pepaneeeeeeeeeeeeee", "Pepan Zdepan", success}, + {"Honzooooooooooooooo", "Honza Novak", success}, + {"Jenikuuuuuuuuuuuuuu", "Jenik Poustevnicek", success}, + } + + for _, v := range data { + t.Logf("INSERT prefix=%v, item=%v, success=%v", v.key, v.value, v.retVal) + if ok := trie.Insert(Prefix(v.key), v.value); ok != v.retVal { + t.Errorf("Unexpected return value, expected=%v, got=%v", v.retVal, ok) + } + } +} + +func TestTrie_InsertDuplicatePrefixes(t *testing.T) { + trie := NewTrie() + + data := []testData{ + {"Pepan", "Pepan Zdepan", success}, + {"Pepan", "Pepan Zdepan", failure}, + } + + for _, v := range data { + t.Logf("INSERT prefix=%v, item=%v, success=%v", v.key, v.value, v.retVal) + if ok := trie.Insert(Prefix(v.key), v.value); ok != v.retVal { + t.Errorf("Unexpected return value, expected=%v, got=%v", v.retVal, ok) + } + } +} + +func TestTrie_InsertVariousPrefixes(t *testing.T) { + trie := NewTrie() + + data := []testData{ + {"Pepan", "Pepan Zdepan", success}, + {"Pepin", "Pepin Omacka", success}, + {"Honza", "Honza Novak", success}, + {"Jenik", "Jenik Poustevnicek", success}, + {"Pepan", "Pepan Dupan", failure}, + {"Karel", "Karel Pekar", success}, + {"Jenik", "Jenik Poustevnicek", failure}, + {"Pepanek", "Pepanek Zemlicka", success}, + } + + for _, v := range data { + t.Logf("INSERT prefix=%v, item=%v, success=%v", v.key, v.value, v.retVal) + if ok := trie.Insert(Prefix(v.key), v.value); ok != v.retVal { + t.Errorf("Unexpected return value, expected=%v, got=%v", v.retVal, ok) + } + } +} + +func TestTrie_InsertAndMatchPrefix(t *testing.T) { + trie := NewTrie() + t.Log("INSERT prefix=by week") + trie.Insert(Prefix("by week"), 2) + t.Log("INSERT prefix=by") + trie.Insert(Prefix("by"), 1) + + if !trie.Match(Prefix("by")) { + t.Error("MATCH prefix=by, expected=true, got=false") + } +} + +func TestTrie_SetGet(t *testing.T) { + trie := NewTrie() + + data := []testData{ + {"Pepan", "Pepan Zdepan", success}, + {"Pepin", "Pepin Omacka", success}, + {"Honza", "Honza Novak", success}, + {"Jenik", "Jenik Poustevnicek", success}, + {"Pepan", "Pepan Dupan", failure}, + {"Karel", "Karel Pekar", success}, + {"Jenik", "Jenik Poustevnicek", failure}, + {"Pepanek", "Pepanek Zemlicka", success}, + } + + for _, v := range data { + t.Logf("INSERT prefix=%v, item=%v, success=%v", v.key, v.value, v.retVal) + if ok := trie.Insert(Prefix(v.key), v.value); ok != v.retVal { + t.Errorf("Unexpected return value, expected=%v, got=%v", v.retVal, ok) + } + } + + for _, v := range data { + t.Logf("SET %q to 10", v.key) + trie.Set(Prefix(v.key), 10) + } + + for _, v := range data { + value := trie.Get(Prefix(v.key)) + t.Logf("GET %q => %v", v.key, value) + if value.(int) != 10 { + t.Errorf("Unexpected return value, != 10", value) + } + } + + if value := trie.Get(Prefix("random crap")); value != nil { + t.Errorf("Unexpected return value, %v != ", value) + } +} + +func TestTrie_Match(t *testing.T) { + trie := NewTrie() + + data := []testData{ + {"Pepan", "Pepan Zdepan", success}, + {"Pepin", "Pepin Omacka", success}, + {"Honza", "Honza Novak", success}, + {"Jenik", "Jenik Poustevnicek", success}, + {"Pepan", "Pepan Dupan", failure}, + {"Karel", "Karel Pekar", success}, + {"Jenik", "Jenik Poustevnicek", failure}, + {"Pepanek", "Pepanek Zemlicka", success}, + } + + for _, v := range data { + t.Logf("INSERT prefix=%v, item=%v, success=%v", v.key, v.value, v.retVal) + if ok := trie.Insert(Prefix(v.key), v.value); ok != v.retVal { + t.Errorf("Unexpected return value, expected=%v, got=%v", v.retVal, ok) + } + } + + for _, v := range data { + matched := trie.Match(Prefix(v.key)) + t.Logf("MATCH %q => %v", v.key, matched) + if !matched { + t.Errorf("Inserted key %q was not matched", v.key) + } + } + + if trie.Match(Prefix("random crap")) { + t.Errorf("Key that was not inserted matched: %q", "random crap") + } +} + +func TestTrie_MatchFalsePositive(t *testing.T) { + trie := NewTrie() + + if ok := trie.Insert(Prefix("A"), 1); !ok { + t.Fatal("INSERT prefix=A, item=1 not ok") + } + + resultMatchSubtree := trie.MatchSubtree(Prefix("A extra")) + resultMatch := trie.Match(Prefix("A extra")) + + if resultMatchSubtree != false { + t.Error("MatchSubtree returned false positive") + } + + if resultMatch != false { + t.Error("Match returned false positive") + } +} + +func TestTrie_MatchSubtree(t *testing.T) { + trie := NewTrie() + + data := []testData{ + {"Pepan", "Pepan Zdepan", success}, + {"Pepin", "Pepin Omacka", success}, + {"Honza", "Honza Novak", success}, + {"Jenik", "Jenik Poustevnicek", success}, + {"Pepan", "Pepan Dupan", failure}, + {"Karel", "Karel Pekar", success}, + {"Jenik", "Jenik Poustevnicek", failure}, + {"Pepanek", "Pepanek Zemlicka", success}, + } + + for _, v := range data { + t.Logf("INSERT prefix=%v, item=%v, success=%v", v.key, v.value, v.retVal) + if ok := trie.Insert(Prefix(v.key), v.value); ok != v.retVal { + t.Errorf("Unexpected return value, expected=%v, got=%v", v.retVal, ok) + } + } + + for _, v := range data { + key := Prefix(v.key[:3]) + matched := trie.MatchSubtree(key) + t.Logf("MATCH_SUBTREE %q => %v", key, matched) + if !matched { + t.Errorf("Subtree %q was not matched", v.key) + } + } +} + +func TestTrie_Visit(t *testing.T) { + trie := NewTrie() + + data := []testData{ + {"Pepa", 0, success}, + {"Pepa Zdepa", 1, success}, + {"Pepa Kuchar", 2, success}, + {"Honza", 3, success}, + {"Jenik", 4, success}, + } + + for _, v := range data { + t.Logf("INSERT prefix=%v, item=%v, success=%v", v.key, v.value, v.retVal) + if ok := trie.Insert([]byte(v.key), v.value); ok != v.retVal { + t.Fatalf("Unexpected return value, expected=%v, got=%v", v.retVal, ok) + } + } + + if err := trie.Visit(func(prefix Prefix, item Item) error { + name := data[item.(int)].key + t.Logf("VISITING prefix=%q, item=%v", prefix, item) + if !strings.HasPrefix(string(prefix), name) { + t.Errorf("Unexpected prefix encountered, %q not a prefix of %q", prefix, name) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +func TestTrie_VisitSkipSubtree(t *testing.T) { + trie := NewTrie() + + data := []testData{ + {"Pepa", 0, success}, + {"Pepa Zdepa", 1, success}, + {"Pepa Kuchar", 2, success}, + {"Honza", 3, success}, + {"Jenik", 4, success}, + } + + for _, v := range data { + t.Logf("INSERT prefix=%v, item=%v, success=%v", v.key, v.value, v.retVal) + if ok := trie.Insert([]byte(v.key), v.value); ok != v.retVal { + t.Fatalf("Unexpected return value, expected=%v, got=%v", v.retVal, ok) + } + } + + if err := trie.Visit(func(prefix Prefix, item Item) error { + t.Logf("VISITING prefix=%q, item=%v", prefix, item) + if item.(int) == 0 { + t.Logf("SKIP %q", prefix) + return SkipSubtree + } + if strings.HasPrefix(string(prefix), "Pepa") { + t.Errorf("Unexpected prefix encountered, %q", prefix) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +func TestTrie_VisitReturnError(t *testing.T) { + trie := NewTrie() + + data := []testData{ + {"Pepa", 0, success}, + {"Pepa Zdepa", 1, success}, + {"Pepa Kuchar", 2, success}, + {"Honza", 3, success}, + {"Jenik", 4, success}, + } + + for _, v := range data { + t.Logf("INSERT prefix=%v, item=%v, success=%v", v.key, v.value, v.retVal) + if ok := trie.Insert([]byte(v.key), v.value); ok != v.retVal { + t.Fatalf("Unexpected return value, expected=%v, got=%v", v.retVal, ok) + } + } + + someErr := errors.New("Something exploded") + if err := trie.Visit(func(prefix Prefix, item Item) error { + t.Logf("VISITING prefix=%q, item=%v", prefix, item) + if item.(int) == 0 { + return someErr + } + if item.(int) != 0 { + t.Errorf("Unexpected prefix encountered, %q", prefix) + } + return nil + }); err != nil && err != someErr { + t.Fatal(err) + } +} + +func TestTrie_VisitSubtree(t *testing.T) { + trie := NewTrie() + + data := []testData{ + {"Pepa", 0, success}, + {"Pepa Zdepa", 1, success}, + {"Pepa Kuchar", 2, success}, + {"Honza", 3, success}, + {"Jenik", 4, success}, + } + + for _, v := range data { + t.Logf("INSERT prefix=%v, item=%v, success=%v", v.key, v.value, v.retVal) + if ok := trie.Insert([]byte(v.key), v.value); ok != v.retVal { + t.Fatalf("Unexpected return value, expected=%v, got=%v", v.retVal, ok) + } + } + + var counter int + subtreePrefix := []byte("Pep") + t.Log("VISIT Pep") + if err := trie.VisitSubtree(subtreePrefix, func(prefix Prefix, item Item) error { + t.Logf("VISITING prefix=%q, item=%v", prefix, item) + if !bytes.HasPrefix(prefix, subtreePrefix) { + t.Errorf("Unexpected prefix encountered, %q does not extend %q", + prefix, subtreePrefix) + } + if len(prefix) > len(data[item.(int)].key) { + t.Fatalf("Something is rather fishy here, prefix=%q", prefix) + } + counter++ + return nil + }); err != nil { + t.Fatal(err) + } + + if counter != 3 { + t.Error("Unexpected number of nodes visited") + } +} + +func TestTrie_VisitPrefixes(t *testing.T) { + trie := NewTrie() + + data := []testData{ + {"P", 0, success}, + {"Pe", 1, success}, + {"Pep", 2, success}, + {"Pepa", 3, success}, + {"Pepa Zdepa", 4, success}, + {"Pepa Kuchar", 5, success}, + {"Honza", 6, success}, + {"Jenik", 7, success}, + } + + for _, v := range data { + t.Logf("INSERT prefix=%v, item=%v, success=%v", v.key, v.value, v.retVal) + if ok := trie.Insert([]byte(v.key), v.value); ok != v.retVal { + t.Fatalf("Unexpected return value, expected=%v, got=%v", v.retVal, ok) + } + } + + var counter int + word := []byte("Pepa") + if err := trie.VisitPrefixes(word, func(prefix Prefix, item Item) error { + t.Logf("VISITING prefix=%q, item=%v", prefix, item) + if !bytes.HasPrefix(word, prefix) { + t.Errorf("Unexpected prefix encountered, %q is not a prefix of %q", + prefix, word) + } + counter++ + return nil + }); err != nil { + t.Fatal(err) + } + + if counter != 4 { + t.Error("Unexpected number of nodes visited") + } +} + +func TestParticiaTrie_Delete(t *testing.T) { + trie := NewTrie() + + data := []testData{ + {"Pepan", "Pepan Zdepan", success}, + {"Honza", "Honza Novak", success}, + {"Jenik", "Jenik Poustevnicek", success}, + } + + for _, v := range data { + t.Logf("INSERT prefix=%v, item=%v, success=%v", v.key, v.value, v.retVal) + if ok := trie.Insert([]byte(v.key), v.value); ok != v.retVal { + t.Fatalf("Unexpected return value, expected=%v, got=%v", v.retVal, ok) + } + } + + for _, v := range data { + t.Logf("DELETE word=%v, success=%v", v.key, v.retVal) + if ok := trie.Delete([]byte(v.key)); ok != v.retVal { + t.Errorf("Unexpected return value, expected=%v, got=%v", v.retVal, ok) + } + } +} + +func TestParticiaTrie_DeleteNonExistent(t *testing.T) { + trie := NewTrie() + + insertData := []testData{ + {"Pepan", "Pepan Zdepan", success}, + {"Honza", "Honza Novak", success}, + {"Jenik", "Jenik Poustevnicek", success}, + } + deleteData := []testData{ + {"Pepan", "Pepan Zdepan", success}, + {"Honza", "Honza Novak", success}, + {"Pepan", "Pepan Zdepan", failure}, + {"Jenik", "Jenik Poustevnicek", success}, + {"Honza", "Honza Novak", failure}, + } + + for _, v := range insertData { + t.Logf("INSERT prefix=%v, item=%v, success=%v", v.key, v.value, v.retVal) + if ok := trie.Insert([]byte(v.key), v.value); ok != v.retVal { + t.Fatalf("Unexpected return value, expected=%v, got=%v", v.retVal, ok) + } + } + + for _, v := range deleteData { + t.Logf("DELETE word=%v, success=%v", v.key, v.retVal) + if ok := trie.Delete([]byte(v.key)); ok != v.retVal { + t.Errorf("Unexpected return value, expected=%v, got=%v", v.retVal, ok) + } + } +} + +func TestParticiaTrie_DeleteSubtree(t *testing.T) { + trie := NewTrie() + + insertData := []testData{ + {"P", 0, success}, + {"Pe", 1, success}, + {"Pep", 2, success}, + {"Pepa", 3, success}, + {"Pepa Zdepa", 4, success}, + {"Pepa Kuchar", 5, success}, + {"Honza", 6, success}, + {"Jenik", 7, success}, + } + deleteData := []testData{ + {"Pe", -1, success}, + {"Pe", -1, failure}, + {"Honzik", -1, failure}, + {"Honza", -1, success}, + {"Honza", -1, failure}, + {"Pep", -1, failure}, + {"P", -1, success}, + {"Nobody", -1, failure}, + {"", -1, success}, + } + + for _, v := range insertData { + t.Logf("INSERT prefix=%v, item=%v, success=%v", v.key, v.value, v.retVal) + if ok := trie.Insert([]byte(v.key), v.value); ok != v.retVal { + t.Fatalf("Unexpected return value, expected=%v, got=%v", v.retVal, ok) + } + } + + for _, v := range deleteData { + t.Logf("DELETE_SUBTREE prefix=%v, success=%v", v.key, v.retVal) + if ok := trie.DeleteSubtree([]byte(v.key)); ok != v.retVal { + t.Errorf("Unexpected return value, expected=%v, got=%v", v.retVal, ok) + } + } +} + +/* +func TestTrie_Dump(t *testing.T) { + trie := NewTrie() + + data := []testData{ + {"Honda", nil, success}, + {"Honza", nil, success}, + {"Jenik", nil, success}, + {"Pepan", nil, success}, + {"Pepin", nil, success}, + } + + for i, v := range data { + if _, ok := trie.Insert([]byte(v.key), v.value); ok != v.retVal { + t.Logf("INSERT %v %v", v.key, v.value) + t.Fatalf("Unexpected return value, expected=%v, got=%v", i, ok) + } + } + + dump := ` ++--+--+ Hon +--+--+ da + | | + | +--+ za + | + +--+ Jenik + | + +--+ Pep +--+--+ an + | + +--+ in +` + + var buf bytes.Buffer + trie.Dump(buf) + + if !bytes.Equal(buf.Bytes(), dump) { + t.Logf("DUMP") + t.Fatalf("Unexpected dump generated, expected\n\n%v\ngot\n\n%v", dump, buf.String()) + } +} +*/ + +func TestTrie_compact(t *testing.T) { + trie := NewTrie() + + trie.Insert(Prefix("a"), 0) + trie.Insert(Prefix("ab"), 0) + trie.Insert(Prefix("abc"), 0) + trie.Insert(Prefix("abcd"), 0) + trie.Insert(Prefix("abcde"), 0) + trie.Insert(Prefix("abcdef"), 0) + trie.Insert(Prefix("abcdefg"), 0) + trie.Insert(Prefix("abcdefgi"), 0) + trie.Insert(Prefix("abcdefgij"), 0) + trie.Insert(Prefix("abcdefgijk"), 0) + + trie.Delete(Prefix("abcdef")) + trie.Delete(Prefix("abcde")) + trie.Delete(Prefix("abcdefg")) + + trie.Delete(Prefix("a")) + trie.Delete(Prefix("abc")) + trie.Delete(Prefix("ab")) + + trie.Visit(func(prefix Prefix, item Item) error { + // 97 ~~ 'a', + for ch := byte(97); ch <= 107; ch++ { + if c := bytes.Count(prefix, []byte{ch}); c > 1 { + t.Errorf("%q appeared in %q %v times", ch, prefix, c) + } + } + return nil + }) +} + +func TestTrie_longestCommonPrefixLenght(t *testing.T) { + trie := NewTrie() + trie.prefix = []byte("1234567890") + + switch { + case trie.longestCommonPrefixLength([]byte("")) != 0: + t.Fail() + case trie.longestCommonPrefixLength([]byte("12345")) != 5: + t.Fail() + case trie.longestCommonPrefixLength([]byte("123789")) != 3: + t.Fail() + case trie.longestCommonPrefixLength([]byte("12345678901")) != 10: + t.Fail() + } +} + +// Examples -------------------------------------------------------------------- + +func ExampleTrie() { + // Create a new tree. + trie := NewTrie() + + // Insert some items. + trie.Insert(Prefix("Pepa Novak"), 1) + trie.Insert(Prefix("Pepa Sindelar"), 2) + trie.Insert(Prefix("Karel Macha"), 3) + trie.Insert(Prefix("Karel Hynek Macha"), 4) + + // Just check if some things are present in the tree. + key := Prefix("Pepa Novak") + fmt.Printf("%q present? %v\n", key, trie.Match(key)) + key = Prefix("Karel") + fmt.Printf("Anybody called %q here? %v\n", key, trie.MatchSubtree(key)) + + // Walk the tree. + trie.Visit(printItem) + // "Pepa Novak": 1 + // "Pepa Sindelar": 2 + // "Karel Macha": 3 + // "Karel Hynek Macha": 4 + + // Walk a subtree. + trie.VisitSubtree(Prefix("Pepa"), printItem) + // "Pepa Novak": 1 + // "Pepa Sindelar": 2 + + // Modify an item, then fetch it from the tree. + trie.Set(Prefix("Karel Hynek Macha"), 10) + key = Prefix("Karel Hynek Macha") + fmt.Printf("%q: %v\n", key, trie.Get(key)) + // "Karel Hynek Macha": 10 + + // Walk prefixes. + prefix := Prefix("Karel Hynek Macha je kouzelnik") + trie.VisitPrefixes(prefix, printItem) + // "Karel Hynek Macha": 10 + + // Delete some items. + trie.Delete(Prefix("Pepa Novak")) + trie.Delete(Prefix("Karel Macha")) + + // Walk again. + trie.Visit(printItem) + // "Pepa Sindelar": 2 + // "Karel Hynek Macha": 10 + + // Delete a subtree. + trie.DeleteSubtree(Prefix("Pepa")) + + // Print what is left. + trie.Visit(printItem) + // "Karel Hynek Macha": 10 + + // Output: + // "Pepa Novak" present? true + // Anybody called "Karel" here? true + // "Pepa Novak": 1 + // "Pepa Sindelar": 2 + // "Karel Macha": 3 + // "Karel Hynek Macha": 4 + // "Pepa Novak": 1 + // "Pepa Sindelar": 2 + // "Karel Hynek Macha": 10 + // "Karel Hynek Macha": 10 + // "Pepa Sindelar": 2 + // "Karel Hynek Macha": 10 + // "Karel Hynek Macha": 10 +} + +// Helpers --------------------------------------------------------------------- + +func printItem(prefix Prefix, item Item) error { + fmt.Printf("%q: %v\n", prefix, item) + return nil +} diff --git a/tests/Godeps/_workspace/src/github.com/tchap/go-patricia/patricia/patricia_test.go b/tests/Godeps/_workspace/src/github.com/tchap/go-patricia/patricia/patricia_test.go new file mode 100644 index 0000000000..ce5ae378fa --- /dev/null +++ b/tests/Godeps/_workspace/src/github.com/tchap/go-patricia/patricia/patricia_test.go @@ -0,0 +1,78 @@ +// Copyright (c) 2014 The go-patricia AUTHORS +// +// Use of this source code is governed by The MIT License +// that can be found in the LICENSE file. + +package patricia + +import ( + "crypto/rand" + "reflect" + "testing" +) + +// Tests ----------------------------------------------------------------------- + +func TestTrie_GetNonexistentPrefix(t *testing.T) { + trie := NewTrie() + + data := []testData{ + {"aba", 0, success}, + } + + for _, v := range data { + t.Logf("INSERT prefix=%v, item=%v, success=%v", v.key, v.value, v.retVal) + if ok := trie.Insert(Prefix(v.key), v.value); ok != v.retVal { + t.Errorf("Unexpected return value, expected=%v, got=%v", v.retVal, ok) + } + } + + t.Logf("GET prefix=baa, expect item=nil") + if item := trie.Get(Prefix("baa")); item != nil { + t.Errorf("Unexpected return value, expected=, got=%v", item) + } +} + +func TestTrie_RandomKitchenSink(t *testing.T) { + if testing.Short() { + t.Skip() + } + const count, size = 750000, 16 + b := make([]byte, count+size+1) + if _, err := rand.Read(b); err != nil { + t.Fatal("error generating random bytes", err) + } + m := make(map[string]string) + for i := 0; i < count; i++ { + m[string(b[i:i+size])] = string(b[i+1 : i+size+1]) + } + trie := NewTrie() + getAndDelete := func(k, v string) { + i := trie.Get(Prefix(k)) + if i == nil { + t.Fatalf("item not found, prefix=%v", []byte(k)) + } else if s, ok := i.(string); !ok { + t.Fatalf("unexpected item type, expecting=%v, got=%v", reflect.TypeOf(k), reflect.TypeOf(i)) + } else if s != v { + t.Fatalf("unexpected item, expecting=%v, got=%v", []byte(k), []byte(s)) + } else if !trie.Delete(Prefix(k)) { + t.Fatalf("delete failed, prefix=%v", []byte(k)) + } else if i = trie.Get(Prefix(k)); i != nil { + t.Fatalf("unexpected item, expecting=, got=%v", i) + } else if trie.Delete(Prefix(k)) { + t.Fatalf("extra delete succeeded, prefix=%v", []byte(k)) + } + } + for k, v := range m { + if !trie.Insert(Prefix(k), v) { + t.Fatalf("insert failed, prefix=%v", []byte(k)) + } + if byte(k[size/2]) < 128 { + getAndDelete(k, v) + delete(m, k) + } + } + for k, v := range m { + getAndDelete(k, v) + } +} diff --git a/tests/Makefile b/tests/Makefile index a795bf3333..6aa0415075 100644 --- a/tests/Makefile +++ b/tests/Makefile @@ -1,28 +1,18 @@ test: test-smoke test-smoke: test-style - GOPATH=$(CURDIR)/_vendor:$(GOPATH) \ - go test -tags integration -short -timeout 20m -v -run TestSmoke + godep go test -tags integration -short -timeout 20m -v -run TestSmoke test-full: test-style - GOPATH=$(CURDIR)/_vendor:$(GOPATH) \ - go test -tags integration -v -run TestGlobal - GOPATH=$(CURDIR)/_vendor:$(GOPATH) \ - go test -tags integration -v -run TestApps - GOPATH=$(CURDIR)/_vendor:$(GOPATH) \ - go test -tags integration -v -run TestAuth - GOPATH=$(CURDIR)/_vendor:$(GOPATH) \ - go test -tags integration -v -run TestBuilds - GOPATH=$(CURDIR)/_vendor:$(GOPATH) \ - go test -tags integration -v -run TestConfig - GOPATH=$(CURDIR)/_vendor:$(GOPATH) \ - go test -tags integration -v -run TestKeys - GOPATH=$(CURDIR)/_vendor:$(GOPATH) \ - go test -tags integration -v -run TestPerms - GOPATH=$(CURDIR)/_vendor:$(GOPATH) \ - go test -tags integration -v -run TestPs - GOPATH=$(CURDIR)/_vendor:$(GOPATH) \ - go test -tags integration -v -run TestReleases + godep go test -tags integration -v -run TestGlobal + godep go test -tags integration -v -run TestApps + godep go test -tags integration -v -run TestAuth + godep go test -tags integration -v -run TestBuilds + godep go test -tags integration -v -run TestConfig + godep go test -tags integration -v -run TestKeys + godep go test -tags integration -v -run TestPerms + godep go test -tags integration -v -run TestPs + godep go test -tags integration -v -run TestReleases setup-root-gotools: sudo GOPATH=/tmp/tmpGOPATH go get -u -v code.google.com/p/go.tools/cmd/cover diff --git a/tests/_vendor/src/github.com/coreos/go-etcd/LICENSE b/tests/_vendor/src/github.com/coreos/go-etcd/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/tests/_vendor/src/github.com/coreos/go-etcd/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/tests/_vendor/src/github.com/docker/docker/VERSION b/tests/_vendor/src/github.com/docker/docker/VERSION deleted file mode 100644 index f0bb29e763..0000000000 --- a/tests/_vendor/src/github.com/docker/docker/VERSION +++ /dev/null @@ -1 +0,0 @@ -1.3.0 diff --git a/tests/_vendor/src/github.com/docker/libcontainer/LICENSE b/tests/_vendor/src/github.com/docker/libcontainer/LICENSE deleted file mode 100644 index 27448585ad..0000000000 --- a/tests/_vendor/src/github.com/docker/libcontainer/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2014 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/tests/_vendor/src/github.com/docker/libtrust/LICENSE b/tests/_vendor/src/github.com/docker/libtrust/LICENSE deleted file mode 100644 index 27448585ad..0000000000 --- a/tests/_vendor/src/github.com/docker/libtrust/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2014 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License.