mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
Merge branch 'job-create-start-clean' into engine-patch-2
Conflicts: engine/engine.go engine/job.go server.go utils_test.go
This commit is contained in:
commit
6c4393ccbb
14 changed files with 642 additions and 312 deletions
58
api.go
58
api.go
|
@ -541,43 +541,36 @@ func postContainersCreate(srv *Server, version float64, w http.ResponseWriter, r
|
|||
if err := parseForm(r); err != nil {
|
||||
return nil
|
||||
}
|
||||
config := &Config{}
|
||||
out := &APIRun{}
|
||||
name := r.Form.Get("name")
|
||||
|
||||
if err := json.NewDecoder(r.Body).Decode(config); err != nil {
|
||||
job := srv.Eng.Job("create", r.Form.Get("name"))
|
||||
if err := job.DecodeEnv(r.Body); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resolvConf, err := utils.GetResolvConf()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !config.NetworkDisabled && len(config.Dns) == 0 && len(srv.runtime.config.Dns) == 0 && utils.CheckLocalDns(resolvConf) {
|
||||
if !job.GetenvBool("NetworkDisabled") && len(job.Getenv("Dns")) == 0 && len(srv.runtime.config.Dns) == 0 && utils.CheckLocalDns(resolvConf) {
|
||||
out.Warnings = append(out.Warnings, fmt.Sprintf("Docker detected local DNS server on resolv.conf. Using default external servers: %v", defaultDns))
|
||||
config.Dns = defaultDns
|
||||
job.SetenvList("Dns", defaultDns)
|
||||
}
|
||||
|
||||
id, warnings, err := srv.ContainerCreate(config, name)
|
||||
if err != nil {
|
||||
// Read container ID from the first line of stdout
|
||||
job.StdoutParseString(&out.ID)
|
||||
// Read warnings from stderr
|
||||
job.StderrParseLines(&out.Warnings, 0)
|
||||
if err := job.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
out.ID = id
|
||||
for _, warning := range warnings {
|
||||
out.Warnings = append(out.Warnings, warning)
|
||||
}
|
||||
|
||||
if config.Memory > 0 && !srv.runtime.capabilities.MemoryLimit {
|
||||
if job.GetenvInt("Memory") > 0 && !srv.runtime.capabilities.MemoryLimit {
|
||||
log.Println("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.")
|
||||
out.Warnings = append(out.Warnings, "Your kernel does not support memory limit capabilities. Limitation discarded.")
|
||||
}
|
||||
if config.Memory > 0 && !srv.runtime.capabilities.SwapLimit {
|
||||
if job.GetenvInt("Memory") > 0 && !srv.runtime.capabilities.SwapLimit {
|
||||
log.Println("WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.")
|
||||
out.Warnings = append(out.Warnings, "Your kernel does not support memory swap capabilities. Limitation discarded.")
|
||||
}
|
||||
|
||||
if !config.NetworkDisabled && srv.runtime.capabilities.IPv4ForwardingDisabled {
|
||||
if !job.GetenvBool("NetworkDisabled") && srv.runtime.capabilities.IPv4ForwardingDisabled {
|
||||
log.Println("Warning: IPv4 forwarding is disabled.")
|
||||
out.Warnings = append(out.Warnings, "IPv4 forwarding is disabled.")
|
||||
}
|
||||
|
@ -654,26 +647,23 @@ func deleteImages(srv *Server, version float64, w http.ResponseWriter, r *http.R
|
|||
}
|
||||
|
||||
func postContainersStart(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
var hostConfig *HostConfig
|
||||
// allow a nil body for backwards compatibility
|
||||
if r.Body != nil {
|
||||
if matchesContentType(r.Header.Get("Content-Type"), "application/json") {
|
||||
hostConfig = &HostConfig{}
|
||||
if err := json.NewDecoder(r.Body).Decode(hostConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if vars == nil {
|
||||
return fmt.Errorf("Missing parameter")
|
||||
}
|
||||
name := vars["name"]
|
||||
// Register any links from the host config before starting the container
|
||||
if err := srv.RegisterLinks(name, hostConfig); err != nil {
|
||||
return err
|
||||
job := srv.Eng.Job("start", name)
|
||||
if err := job.ImportEnv(HostConfig{}); err != nil {
|
||||
return fmt.Errorf("Couldn't initialize host configuration")
|
||||
}
|
||||
if err := srv.ContainerStart(name, hostConfig); err != nil {
|
||||
// allow a nil body for backwards compatibility
|
||||
if r.Body != nil {
|
||||
if matchesContentType(r.Header.Get("Content-Type"), "application/json") {
|
||||
if err := job.DecodeEnv(r.Body); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := job.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
|
|
34
api_test.go
34
api_test.go
|
@ -609,11 +609,11 @@ func TestPostCommit(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestPostContainersCreate(t *testing.T) {
|
||||
runtime := mkRuntime(t)
|
||||
eng := NewTestEngine(t)
|
||||
srv := mkServerFromEngine(eng, t)
|
||||
runtime := srv.runtime
|
||||
defer nuke(runtime)
|
||||
|
||||
srv := &Server{runtime: runtime}
|
||||
|
||||
configJSON, err := json.Marshal(&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Memory: 33554432,
|
||||
|
@ -756,27 +756,23 @@ func TestPostContainersRestart(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestPostContainersStart(t *testing.T) {
|
||||
runtime := mkRuntime(t)
|
||||
eng := NewTestEngine(t)
|
||||
srv := mkServerFromEngine(eng, t)
|
||||
runtime := srv.runtime
|
||||
defer nuke(runtime)
|
||||
|
||||
srv := &Server{runtime: runtime}
|
||||
|
||||
container, _, err := runtime.Create(
|
||||
id := createTestContainer(
|
||||
eng,
|
||||
&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Cmd: []string{"/bin/cat"},
|
||||
OpenStdin: true,
|
||||
},
|
||||
"",
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer runtime.Destroy(container)
|
||||
t)
|
||||
|
||||
hostConfigJSON, err := json.Marshal(&HostConfig{})
|
||||
|
||||
req, err := http.NewRequest("POST", "/containers/"+container.ID+"/start", bytes.NewReader(hostConfigJSON))
|
||||
req, err := http.NewRequest("POST", "/containers/"+id+"/start", bytes.NewReader(hostConfigJSON))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -784,22 +780,26 @@ func TestPostContainersStart(t *testing.T) {
|
|||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
r := httptest.NewRecorder()
|
||||
if err := postContainersStart(srv, APIVERSION, r, req, map[string]string{"name": container.ID}); err != nil {
|
||||
if err := postContainersStart(srv, APIVERSION, r, req, map[string]string{"name": id}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if r.Code != http.StatusNoContent {
|
||||
t.Fatalf("%d NO CONTENT expected, received %d\n", http.StatusNoContent, r.Code)
|
||||
}
|
||||
|
||||
container := runtime.Get(id)
|
||||
if container == nil {
|
||||
t.Fatalf("Container %s was not created", id)
|
||||
}
|
||||
// Give some time to the process to start
|
||||
// FIXME: use Wait once it's available as a job
|
||||
container.WaitTimeout(500 * time.Millisecond)
|
||||
|
||||
if !container.State.Running {
|
||||
t.Errorf("Container should be running")
|
||||
}
|
||||
|
||||
r = httptest.NewRecorder()
|
||||
if err = postContainersStart(srv, APIVERSION, r, req, map[string]string{"name": container.ID}); err == nil {
|
||||
if err = postContainersStart(srv, APIVERSION, r, req, map[string]string{"name": id}); err == nil {
|
||||
t.Fatalf("A running container should be able to be started")
|
||||
}
|
||||
|
||||
|
|
|
@ -544,10 +544,7 @@ func TestBuildADDFileNotFound(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBuildInheritance(t *testing.T) {
|
||||
runtime, err := newTestRuntime("")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
runtime := mkRuntime(t)
|
||||
defer nuke(runtime)
|
||||
|
||||
srv := &Server{
|
||||
|
|
|
@ -9,7 +9,6 @@ import (
|
|||
type DaemonConfig struct {
|
||||
Pidfile string
|
||||
Root string
|
||||
ProtoAddresses []string
|
||||
AutoRestart bool
|
||||
EnableCors bool
|
||||
Dns []string
|
||||
|
@ -36,7 +35,6 @@ func ConfigFromJob(job *engine.Job) *DaemonConfig {
|
|||
} else {
|
||||
config.BridgeIface = DefaultNetworkBridge
|
||||
}
|
||||
config.ProtoAddresses = job.GetenvList("ProtoAddresses")
|
||||
config.DefaultIp = net.ParseIP(job.Getenv("DefaultIp"))
|
||||
config.InterContainerCommunication = job.GetenvBool("InterContainerCommunication")
|
||||
return &config
|
||||
|
|
|
@ -71,7 +71,8 @@ func main() {
|
|||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
job := eng.Job("serveapi")
|
||||
// Load plugin: httpapi
|
||||
job := eng.Job("initapi")
|
||||
job.Setenv("Pidfile", *pidfile)
|
||||
job.Setenv("Root", *flRoot)
|
||||
job.SetenvBool("AutoRestart", *flAutoRestart)
|
||||
|
@ -79,12 +80,17 @@ func main() {
|
|||
job.Setenv("Dns", *flDns)
|
||||
job.SetenvBool("EnableIptables", *flEnableIptables)
|
||||
job.Setenv("BridgeIface", *bridgeName)
|
||||
job.SetenvList("ProtoAddresses", flHosts)
|
||||
job.Setenv("DefaultIp", *flDefaultIp)
|
||||
job.SetenvBool("InterContainerCommunication", *flInterContainerComm)
|
||||
if err := job.Run(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
// Serve api
|
||||
job = eng.Job("serveapi", flHosts...)
|
||||
job.SetenvBool("Logging", true)
|
||||
if err := job.Run(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
if len(flHosts) > 1 {
|
||||
log.Fatal("Please specify only one -H")
|
||||
|
|
|
@ -6,15 +6,21 @@ import (
|
|||
"log"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Handler func(*Job) string
|
||||
|
||||
var globalHandlers map[string]Handler
|
||||
|
||||
func init() {
|
||||
globalHandlers = make(map[string]Handler)
|
||||
}
|
||||
|
||||
func Register(name string, handler Handler) error {
|
||||
if globalHandlers == nil {
|
||||
globalHandlers = make(map[string]Handler)
|
||||
_, exists := globalHandlers[name]
|
||||
if exists {
|
||||
return fmt.Errorf("Can't overwrite global handler for command %s", name)
|
||||
}
|
||||
globalHandlers[name] = handler
|
||||
return nil
|
||||
|
@ -24,10 +30,27 @@ func Register(name string, handler Handler) error {
|
|||
// It acts as a store for *containers*, and allows manipulation of these
|
||||
// containers by executing *jobs*.
|
||||
type Engine struct {
|
||||
root string
|
||||
handlers map[string]Handler
|
||||
root string
|
||||
handlers map[string]Handler
|
||||
hack Hack // data for temporary hackery (see hack.go)
|
||||
id string
|
||||
}
|
||||
|
||||
func (eng *Engine) Root() string {
|
||||
return eng.root
|
||||
}
|
||||
|
||||
func (eng *Engine) Register(name string, handler Handler) error {
|
||||
eng.Logf("Register(%s) (handlers=%v)", name, eng.handlers)
|
||||
_, exists := eng.handlers[name]
|
||||
if exists {
|
||||
return fmt.Errorf("Can't overwrite handler for command %s", name)
|
||||
}
|
||||
eng.handlers[name] = handler
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
// New initializes a new engine managing the directory specified at `root`.
|
||||
// `root` is used to store containers and any other state private to the engine.
|
||||
// Changing the contents of the root without executing a job will cause unspecified
|
||||
|
@ -55,22 +78,31 @@ func New(root string) (*Engine, error) {
|
|||
return nil, err
|
||||
}
|
||||
eng := &Engine{
|
||||
root: root,
|
||||
handlers: globalHandlers,
|
||||
root: root,
|
||||
handlers: make(map[string]Handler),
|
||||
id: utils.RandomString(),
|
||||
}
|
||||
// Copy existing global handlers
|
||||
for k, v := range globalHandlers {
|
||||
eng.handlers[k] = v
|
||||
}
|
||||
return eng, nil
|
||||
}
|
||||
|
||||
func (eng *Engine) String() string {
|
||||
return fmt.Sprintf("%s|%s", eng.Root(), eng.id[:8])
|
||||
}
|
||||
|
||||
// Job creates a new job which can later be executed.
|
||||
// This function mimics `Command` from the standard os/exec package.
|
||||
func (eng *Engine) Job(name string, args ...string) *Job {
|
||||
job := &Job{
|
||||
eng: eng,
|
||||
Name: name,
|
||||
Args: args,
|
||||
Stdin: os.Stdin,
|
||||
Stdout: os.Stdout,
|
||||
Stderr: os.Stderr,
|
||||
Eng: eng,
|
||||
Name: name,
|
||||
Args: args,
|
||||
Stdin: os.Stdin,
|
||||
Stdout: os.Stdout,
|
||||
Stderr: os.Stderr,
|
||||
}
|
||||
handler, exists := eng.handlers[name]
|
||||
if exists {
|
||||
|
@ -78,3 +110,8 @@ func (eng *Engine) Job(name string, args ...string) *Job {
|
|||
}
|
||||
return job
|
||||
}
|
||||
|
||||
func (eng *Engine) Logf(format string, args ...interface{}) (n int, err error) {
|
||||
prefixedFormat := fmt.Sprintf("[%s] %s\n", eng, strings.TrimRight(format, "\n"))
|
||||
return fmt.Fprintf(os.Stderr, prefixedFormat, args...)
|
||||
}
|
||||
|
|
23
engine/hack.go
Normal file
23
engine/hack.go
Normal file
|
@ -0,0 +1,23 @@
|
|||
package engine
|
||||
|
||||
|
||||
type Hack map[string]interface{}
|
||||
|
||||
|
||||
func (eng *Engine) Hack_GetGlobalVar(key string) interface{} {
|
||||
if eng.hack == nil {
|
||||
return nil
|
||||
}
|
||||
val, exists := eng.hack[key]
|
||||
if !exists {
|
||||
return nil
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
func (eng *Engine) Hack_SetGlobalVar(key string, val interface{}) {
|
||||
if eng.hack == nil {
|
||||
eng.hack = make(Hack)
|
||||
}
|
||||
eng.hack[key] = val
|
||||
}
|
250
engine/job.go
250
engine/job.go
|
@ -1,11 +1,16 @@
|
|||
package engine
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"bufio"
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"strconv"
|
||||
"strings"
|
||||
"fmt"
|
||||
"sync"
|
||||
"encoding/json"
|
||||
"os"
|
||||
)
|
||||
|
||||
// A job is the fundamental unit of work in the docker engine.
|
||||
|
@ -22,24 +27,43 @@ import (
|
|||
// This allows for richer error reporting.
|
||||
//
|
||||
type Job struct {
|
||||
eng *Engine
|
||||
Name string
|
||||
Args []string
|
||||
env []string
|
||||
Stdin io.ReadCloser
|
||||
Stdout io.WriteCloser
|
||||
Stderr io.WriteCloser
|
||||
handler func(*Job) string
|
||||
status string
|
||||
Eng *Engine
|
||||
Name string
|
||||
Args []string
|
||||
env []string
|
||||
Stdin io.Reader
|
||||
Stdout io.Writer
|
||||
Stderr io.Writer
|
||||
handler func(*Job) string
|
||||
status string
|
||||
onExit []func()
|
||||
}
|
||||
|
||||
// Run executes the job and blocks until the job completes.
|
||||
// If the job returns a failure status, an error is returned
|
||||
// which includes the status.
|
||||
func (job *Job) Run() error {
|
||||
randId := utils.RandomString()[:4]
|
||||
fmt.Printf("Job #%s: %s\n", randId, job)
|
||||
defer fmt.Printf("Job #%s: %s = '%s'", randId, job, job.status)
|
||||
defer func() {
|
||||
var wg sync.WaitGroup
|
||||
for _, f := range job.onExit {
|
||||
wg.Add(1)
|
||||
go func(f func()) {
|
||||
f()
|
||||
wg.Done()
|
||||
}(f)
|
||||
}
|
||||
wg.Wait()
|
||||
}()
|
||||
if job.Stdout != nil && job.Stdout != os.Stdout {
|
||||
job.Stdout = io.MultiWriter(job.Stdout, os.Stdout)
|
||||
}
|
||||
if job.Stderr != nil && job.Stderr != os.Stderr {
|
||||
job.Stderr = io.MultiWriter(job.Stderr, os.Stderr)
|
||||
}
|
||||
job.Eng.Logf("+job %s", job.CallString())
|
||||
defer func() {
|
||||
job.Eng.Logf("-job %s%s", job.CallString(), job.StatusString())
|
||||
}()
|
||||
if job.handler == nil {
|
||||
job.status = "command not found"
|
||||
} else {
|
||||
|
@ -51,9 +75,84 @@ func (job *Job) Run() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (job *Job) StdoutParseLines(dst *[]string, limit int) {
|
||||
job.parseLines(job.StdoutPipe(), dst, limit)
|
||||
}
|
||||
|
||||
func (job *Job) StderrParseLines(dst *[]string, limit int) {
|
||||
job.parseLines(job.StderrPipe(), dst, limit)
|
||||
}
|
||||
|
||||
func (job *Job) parseLines(src io.Reader, dst *[]string, limit int) {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
scanner := bufio.NewScanner(src)
|
||||
for scanner.Scan() {
|
||||
// If the limit is reached, flush the rest of the source and return
|
||||
if limit > 0 && len(*dst) >= limit {
|
||||
io.Copy(ioutil.Discard, src)
|
||||
return
|
||||
}
|
||||
line := scanner.Text()
|
||||
// Append the line (with delimitor removed)
|
||||
*dst = append(*dst, line)
|
||||
}
|
||||
}()
|
||||
job.onExit = append(job.onExit, wg.Wait)
|
||||
}
|
||||
|
||||
func (job *Job) StdoutParseString(dst *string) {
|
||||
lines := make([]string, 0, 1)
|
||||
job.StdoutParseLines(&lines, 1)
|
||||
job.onExit = append(job.onExit, func() { if len(lines) >= 1 { *dst = lines[0] }})
|
||||
}
|
||||
|
||||
func (job *Job) StderrParseString(dst *string) {
|
||||
lines := make([]string, 0, 1)
|
||||
job.StderrParseLines(&lines, 1)
|
||||
job.onExit = append(job.onExit, func() { *dst = lines[0]; })
|
||||
}
|
||||
|
||||
func (job *Job) StdoutPipe() io.ReadCloser {
|
||||
r, w := io.Pipe()
|
||||
job.Stdout = w
|
||||
job.onExit = append(job.onExit, func(){ w.Close() })
|
||||
return r
|
||||
}
|
||||
|
||||
func (job *Job) StderrPipe() io.ReadCloser {
|
||||
r, w := io.Pipe()
|
||||
job.Stderr = w
|
||||
job.onExit = append(job.onExit, func(){ w.Close() })
|
||||
return r
|
||||
}
|
||||
|
||||
|
||||
func (job *Job) CallString() string {
|
||||
return fmt.Sprintf("%s(%s)", job.Name, strings.Join(job.Args, ", "))
|
||||
}
|
||||
|
||||
func (job *Job) StatusString() string {
|
||||
// FIXME: if a job returns the empty string, it will be printed
|
||||
// as not having returned.
|
||||
// (this only affects String which is a convenience function).
|
||||
if job.status != "" {
|
||||
var okerr string
|
||||
if job.status == "0" {
|
||||
okerr = "OK"
|
||||
} else {
|
||||
okerr = "ERR"
|
||||
}
|
||||
return fmt.Sprintf(" = %s (%s)", okerr, job.status)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// String returns a human-readable description of `job`
|
||||
func (job *Job) String() string {
|
||||
return strings.Join(append([]string{job.Name}, job.Args...), " ")
|
||||
return fmt.Sprintf("%s.%s%s", job.Eng, job.CallString(), job.StatusString())
|
||||
}
|
||||
|
||||
func (job *Job) Getenv(key string) (value string) {
|
||||
|
@ -90,6 +189,19 @@ func (job *Job) SetenvBool(key string, value bool) {
|
|||
}
|
||||
}
|
||||
|
||||
func (job *Job) GetenvInt(key string) int64 {
|
||||
s := strings.Trim(job.Getenv(key), " \t")
|
||||
val, err := strconv.ParseInt(s, 10, 64)
|
||||
if err != nil {
|
||||
return -1
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
func (job *Job) SetenvInt(key string, value int64) {
|
||||
job.Setenv(key, fmt.Sprintf("%d", value))
|
||||
}
|
||||
|
||||
func (job *Job) GetenvList(key string) []string {
|
||||
sval := job.Getenv(key)
|
||||
l := make([]string, 0, 1)
|
||||
|
@ -111,3 +223,109 @@ func (job *Job) SetenvList(key string, value []string) error {
|
|||
func (job *Job) Setenv(key, value string) {
|
||||
job.env = append(job.env, key+"="+value)
|
||||
}
|
||||
|
||||
// DecodeEnv decodes `src` as a json dictionary, and adds
|
||||
// each decoded key-value pair to the environment.
|
||||
//
|
||||
// If `text` cannot be decoded as a json dictionary, an error
|
||||
// is returned.
|
||||
func (job *Job) DecodeEnv(src io.Reader) error {
|
||||
m := make(map[string]interface{})
|
||||
if err := json.NewDecoder(src).Decode(&m); err != nil {
|
||||
return err
|
||||
}
|
||||
for k, v := range m {
|
||||
// FIXME: we fix-convert float values to int, because
|
||||
// encoding/json decodes integers to float64, but cannot encode them back.
|
||||
// (See http://golang.org/src/pkg/encoding/json/decode.go#L46)
|
||||
if fval, ok := v.(float64); ok {
|
||||
job.SetenvInt(k, int64(fval))
|
||||
} else if sval, ok := v.(string); ok {
|
||||
job.Setenv(k, sval)
|
||||
} else if val, err := json.Marshal(v); err == nil {
|
||||
job.Setenv(k, string(val))
|
||||
} else {
|
||||
job.Setenv(k, fmt.Sprintf("%v", v))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (job *Job) EncodeEnv(dst io.Writer) error {
|
||||
m := make(map[string]interface{})
|
||||
for k, v := range job.Environ() {
|
||||
var val interface{}
|
||||
if err := json.Unmarshal([]byte(v), &val); err == nil {
|
||||
// FIXME: we fix-convert float values to int, because
|
||||
// encoding/json decodes integers to float64, but cannot encode them back.
|
||||
// (See http://golang.org/src/pkg/encoding/json/decode.go#L46)
|
||||
if fval, isFloat := val.(float64); isFloat {
|
||||
val = int(fval)
|
||||
}
|
||||
m[k] = val
|
||||
} else {
|
||||
m[k] = v
|
||||
}
|
||||
}
|
||||
if err := json.NewEncoder(dst).Encode(&m); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (job *Job) ExportEnv(dst interface{}) (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
err = fmt.Errorf("ExportEnv %s", err)
|
||||
}
|
||||
}()
|
||||
var buf bytes.Buffer
|
||||
// step 1: encode/marshal the env to an intermediary json representation
|
||||
if err := job.EncodeEnv(&buf); err != nil {
|
||||
return err
|
||||
}
|
||||
// step 2: decode/unmarshal the intermediary json into the destination object
|
||||
if err := json.NewDecoder(&buf).Decode(dst); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (job *Job) ImportEnv(src interface{}) (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
err = fmt.Errorf("ImportEnv: %s", err)
|
||||
}
|
||||
}()
|
||||
var buf bytes.Buffer
|
||||
if err := json.NewEncoder(&buf).Encode(src); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := job.DecodeEnv(&buf); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (job *Job) Environ() map[string]string {
|
||||
m := make(map[string]string)
|
||||
for _, kv := range job.env {
|
||||
parts := strings.SplitN(kv, "=", 2)
|
||||
m[parts[0]] = parts[1]
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func (job *Job) Logf(format string, args ...interface{}) (n int, err error) {
|
||||
prefixedFormat := fmt.Sprintf("[%s] %s\n", job, strings.TrimRight(format, "\n"))
|
||||
return fmt.Fprintf(job.Stderr, prefixedFormat, args...)
|
||||
}
|
||||
|
||||
func (job *Job) Printf(format string, args ...interface{}) (n int, err error) {
|
||||
return fmt.Fprintf(job.Stdout, format, args...)
|
||||
}
|
||||
|
||||
func (job *Job) Errorf(format string, args ...interface{}) (n int, err error) {
|
||||
return fmt.Fprintf(job.Stderr, format, args...)
|
||||
|
||||
}
|
||||
|
|
|
@ -15,7 +15,7 @@ func init() {
|
|||
Register("dummy", func(job *Job) string { return "" })
|
||||
}
|
||||
|
||||
func mkEngine(t *testing.T) *Engine {
|
||||
func newTestEngine(t *testing.T) *Engine {
|
||||
// Use the caller function name as a prefix.
|
||||
// This helps trace temp directories back to their test.
|
||||
pc, _, _, _ := runtime.Caller(1)
|
||||
|
@ -38,5 +38,5 @@ func mkEngine(t *testing.T) *Engine {
|
|||
}
|
||||
|
||||
func mkJob(t *testing.T, name string, args ...string) *Job {
|
||||
return mkEngine(t).Job(name, args...)
|
||||
return newTestEngine(t).Job(name, args...)
|
||||
}
|
105
runtime_test.go
105
runtime_test.go
|
@ -3,6 +3,7 @@ package docker
|
|||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/engine"
|
||||
"github.com/dotcloud/docker/sysinit"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io"
|
||||
|
@ -17,6 +18,7 @@ import (
|
|||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -119,22 +121,19 @@ func init() {
|
|||
}
|
||||
|
||||
func setupBaseImage() {
|
||||
config := &DaemonConfig{
|
||||
Root: unitTestStoreBase,
|
||||
AutoRestart: false,
|
||||
BridgeIface: unitTestNetworkBridge,
|
||||
}
|
||||
runtime, err := NewRuntimeFromDirectory(config)
|
||||
eng, err := engine.New(unitTestStoreBase)
|
||||
if err != nil {
|
||||
log.Fatalf("Can't initialize engine at %s: %s", unitTestStoreBase, err)
|
||||
}
|
||||
job := eng.Job("initapi")
|
||||
job.Setenv("Root", unitTestStoreBase)
|
||||
job.SetenvBool("Autorestart", false)
|
||||
job.Setenv("BridgeIface", unitTestNetworkBridge)
|
||||
if err := job.Run(); err != nil {
|
||||
log.Fatalf("Unable to create a runtime for tests:", err)
|
||||
}
|
||||
|
||||
// Create the "Server"
|
||||
srv := &Server{
|
||||
runtime: runtime,
|
||||
pullingPool: make(map[string]struct{}),
|
||||
pushingPool: make(map[string]struct{}),
|
||||
}
|
||||
srv := mkServerFromEngine(eng, log.New(os.Stderr, "", 0))
|
||||
runtime := srv.runtime
|
||||
|
||||
// If the unit test is not found, try to download it.
|
||||
if img, err := runtime.repositories.LookupImage(unitTestImageName); err != nil || img.ID != unitTestImageID {
|
||||
|
@ -150,18 +149,22 @@ func spawnGlobalDaemon() {
|
|||
utils.Debugf("Global runtime already exists. Skipping.")
|
||||
return
|
||||
}
|
||||
globalRuntime = mkRuntime(log.New(os.Stderr, "", 0))
|
||||
srv := &Server{
|
||||
runtime: globalRuntime,
|
||||
pullingPool: make(map[string]struct{}),
|
||||
pushingPool: make(map[string]struct{}),
|
||||
}
|
||||
t := log.New(os.Stderr, "", 0)
|
||||
eng := NewTestEngine(t)
|
||||
srv := mkServerFromEngine(eng, t)
|
||||
globalRuntime = srv.runtime
|
||||
|
||||
// Spawn a Daemon
|
||||
go func() {
|
||||
utils.Debugf("Spawning global daemon for integration tests")
|
||||
if err := ListenAndServe(testDaemonProto, testDaemonAddr, srv, os.Getenv("DEBUG") != ""); err != nil {
|
||||
log.Fatalf("Unable to spawn the test daemon:", err)
|
||||
listenURL := &url.URL{
|
||||
Scheme: testDaemonProto,
|
||||
Host: testDaemonAddr,
|
||||
}
|
||||
job := eng.Job("serveapi", listenURL.String())
|
||||
job.SetenvBool("Logging", os.Getenv("DEBUG") != "")
|
||||
if err := job.Run(); err != nil {
|
||||
log.Fatalf("Unable to spawn the test daemon: %s", err)
|
||||
}
|
||||
}()
|
||||
// Give some time to ListenAndServer to actually start
|
||||
|
@ -181,7 +184,7 @@ func GetTestImage(runtime *Runtime) *Image {
|
|||
return image
|
||||
}
|
||||
}
|
||||
log.Fatalf("Test image %v not found", unitTestImageID)
|
||||
log.Fatalf("Test image %v not found in %s: %s", unitTestImageID, runtime.graph.Root, imgs)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -643,20 +646,17 @@ func TestReloadContainerLinks(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDefaultContainerName(t *testing.T) {
|
||||
runtime := mkRuntime(t)
|
||||
eng := NewTestEngine(t)
|
||||
srv := mkServerFromEngine(eng, t)
|
||||
runtime := srv.runtime
|
||||
defer nuke(runtime)
|
||||
srv := &Server{runtime: runtime}
|
||||
|
||||
config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
shortId, _, err := srv.ContainerCreate(config, "some_name")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
container := runtime.Get(shortId)
|
||||
container := runtime.Get(createNamedTestContainer(eng, config, t, "some_name"))
|
||||
containerID := container.ID
|
||||
|
||||
if container.Name != "/some_name" {
|
||||
|
@ -680,20 +680,17 @@ func TestDefaultContainerName(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRandomContainerName(t *testing.T) {
|
||||
runtime := mkRuntime(t)
|
||||
eng := NewTestEngine(t)
|
||||
srv := mkServerFromEngine(eng, t)
|
||||
runtime := srv.runtime
|
||||
defer nuke(runtime)
|
||||
srv := &Server{runtime: runtime}
|
||||
|
||||
config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
shortId, _, err := srv.ContainerCreate(config, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
container := runtime.Get(shortId)
|
||||
container := runtime.Get(createTestContainer(eng, config, t))
|
||||
containerID := container.ID
|
||||
|
||||
if container.Name == "" {
|
||||
|
@ -717,20 +714,17 @@ func TestRandomContainerName(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestLinkChildContainer(t *testing.T) {
|
||||
runtime := mkRuntime(t)
|
||||
eng := NewTestEngine(t)
|
||||
srv := mkServerFromEngine(eng, t)
|
||||
runtime := srv.runtime
|
||||
defer nuke(runtime)
|
||||
srv := &Server{runtime: runtime}
|
||||
|
||||
config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
shortId, _, err := srv.ContainerCreate(config, "/webapp")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
container := runtime.Get(shortId)
|
||||
container := runtime.Get(createNamedTestContainer(eng, config, t, "/webapp"))
|
||||
|
||||
webapp, err := runtime.GetByName("/webapp")
|
||||
if err != nil {
|
||||
|
@ -746,12 +740,7 @@ func TestLinkChildContainer(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
shortId, _, err = srv.ContainerCreate(config, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
childContainer := runtime.Get(shortId)
|
||||
childContainer := runtime.Get(createTestContainer(eng, config, t))
|
||||
|
||||
if err := runtime.RegisterLink(webapp, childContainer, "db"); err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -768,20 +757,17 @@ func TestLinkChildContainer(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGetAllChildren(t *testing.T) {
|
||||
runtime := mkRuntime(t)
|
||||
eng := NewTestEngine(t)
|
||||
srv := mkServerFromEngine(eng, t)
|
||||
runtime := srv.runtime
|
||||
defer nuke(runtime)
|
||||
srv := &Server{runtime: runtime}
|
||||
|
||||
config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
shortId, _, err := srv.ContainerCreate(config, "/webapp")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
container := runtime.Get(shortId)
|
||||
container := runtime.Get(createNamedTestContainer(eng, config, t, "/webapp"))
|
||||
|
||||
webapp, err := runtime.GetByName("/webapp")
|
||||
if err != nil {
|
||||
|
@ -797,12 +783,7 @@ func TestGetAllChildren(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
shortId, _, err = srv.ContainerCreate(config, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
childContainer := runtime.Get(shortId)
|
||||
childContainer := runtime.Get(createTestContainer(eng, config, t))
|
||||
|
||||
if err := runtime.RegisterLink(webapp, childContainer, "db"); err != nil {
|
||||
t.Fatal(err)
|
||||
|
|
158
server.go
158
server.go
|
@ -33,30 +33,25 @@ func (srv *Server) Close() error {
|
|||
}
|
||||
|
||||
func init() {
|
||||
engine.Register("serveapi", JobServeApi)
|
||||
engine.Register("initapi", jobInitApi)
|
||||
}
|
||||
|
||||
func JobServeApi(job *engine.Job) string {
|
||||
srv, err := NewServer(ConfigFromJob(job))
|
||||
// jobInitApi runs the remote api server `srv` as a daemon,
|
||||
// Only one api server can run at the same time - this is enforced by a pidfile.
|
||||
// The signals SIGINT, SIGKILL and SIGTERM are intercepted for cleanup.
|
||||
func jobInitApi(job *engine.Job) string {
|
||||
job.Logf("Creating server")
|
||||
srv, err := NewServer(job.Eng, ConfigFromJob(job))
|
||||
if err != nil {
|
||||
return err.Error()
|
||||
}
|
||||
defer srv.Close()
|
||||
if err := srv.Daemon(); err != nil {
|
||||
return err.Error()
|
||||
if srv.runtime.config.Pidfile != "" {
|
||||
job.Logf("Creating pidfile")
|
||||
if err := utils.CreatePidFile(srv.runtime.config.Pidfile); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
return "0"
|
||||
}
|
||||
|
||||
// Daemon runs the remote api server `srv` as a daemon,
|
||||
// Only one api server can run at the same time - this is enforced by a pidfile.
|
||||
// The signals SIGINT, SIGKILL and SIGTERM are intercepted for cleanup.
|
||||
func (srv *Server) Daemon() error {
|
||||
if err := utils.CreatePidFile(srv.runtime.config.Pidfile); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer utils.RemovePidFile(srv.runtime.config.Pidfile)
|
||||
|
||||
job.Logf("Setting up signal traps")
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, os.Interrupt, os.Kill, os.Signal(syscall.SIGTERM))
|
||||
go func() {
|
||||
|
@ -66,8 +61,22 @@ func (srv *Server) Daemon() error {
|
|||
srv.Close()
|
||||
os.Exit(0)
|
||||
}()
|
||||
job.Eng.Hack_SetGlobalVar("httpapi.server", srv)
|
||||
if err := job.Eng.Register("create", srv.ContainerCreate); err != nil {
|
||||
return err.Error()
|
||||
}
|
||||
if err := job.Eng.Register("start", srv.ContainerStart); err != nil {
|
||||
return err.Error()
|
||||
}
|
||||
if err := job.Eng.Register("serveapi", srv.ListenAndServe); err != nil {
|
||||
return err.Error()
|
||||
}
|
||||
return "0"
|
||||
}
|
||||
|
||||
protoAddrs := srv.runtime.config.ProtoAddresses
|
||||
|
||||
func (srv *Server) ListenAndServe(job *engine.Job) string {
|
||||
protoAddrs := job.Args
|
||||
chErrors := make(chan error, len(protoAddrs))
|
||||
for _, protoAddr := range protoAddrs {
|
||||
protoAddrParts := strings.SplitN(protoAddr, "://", 2)
|
||||
|
@ -81,19 +90,20 @@ func (srv *Server) Daemon() error {
|
|||
log.Println("/!\\ DON'T BIND ON ANOTHER IP ADDRESS THAN 127.0.0.1 IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\")
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("Invalid protocol format.")
|
||||
return "Invalid protocol format."
|
||||
}
|
||||
go func() {
|
||||
chErrors <- ListenAndServe(protoAddrParts[0], protoAddrParts[1], srv, true)
|
||||
// FIXME: merge Server.ListenAndServe with ListenAndServe
|
||||
chErrors <- ListenAndServe(protoAddrParts[0], protoAddrParts[1], srv, job.GetenvBool("Logging"))
|
||||
}()
|
||||
}
|
||||
for i := 0; i < len(protoAddrs); i += 1 {
|
||||
err := <-chErrors
|
||||
if err != nil {
|
||||
return err
|
||||
return err.Error()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return "0"
|
||||
}
|
||||
|
||||
func (srv *Server) DockerVersion() APIVersion {
|
||||
|
@ -1021,33 +1031,43 @@ func (srv *Server) ImageImport(src, repo, tag string, in io.Reader, out io.Write
|
|||
return nil
|
||||
}
|
||||
|
||||
func (srv *Server) ContainerCreate(config *Config, name string) (string, []string, error) {
|
||||
if config.Memory != 0 && config.Memory < 524288 {
|
||||
return "", nil, fmt.Errorf("Minimum memory limit allowed is 512k")
|
||||
func (srv *Server) ContainerCreate(job *engine.Job) string {
|
||||
var name string
|
||||
if len(job.Args) == 1 {
|
||||
name = job.Args[0]
|
||||
} else if len(job.Args) > 1 {
|
||||
return fmt.Sprintf("Usage: %s ", job.Name)
|
||||
}
|
||||
var config Config
|
||||
if err := job.ExportEnv(&config); err != nil {
|
||||
return err.Error()
|
||||
}
|
||||
if config.Memory != 0 && config.Memory < 524288 {
|
||||
return "Minimum memory limit allowed is 512k"
|
||||
}
|
||||
|
||||
if config.Memory > 0 && !srv.runtime.capabilities.MemoryLimit {
|
||||
config.Memory = 0
|
||||
}
|
||||
|
||||
if config.Memory > 0 && !srv.runtime.capabilities.SwapLimit {
|
||||
config.MemorySwap = -1
|
||||
}
|
||||
container, buildWarnings, err := srv.runtime.Create(config, name)
|
||||
container, buildWarnings, err := srv.runtime.Create(&config, name)
|
||||
if err != nil {
|
||||
if srv.runtime.graph.IsNotExist(err) {
|
||||
|
||||
_, tag := utils.ParseRepositoryTag(config.Image)
|
||||
if tag == "" {
|
||||
tag = DEFAULTTAG
|
||||
}
|
||||
|
||||
return "", nil, fmt.Errorf("No such image: %s (tag: %s)", config.Image, tag)
|
||||
return fmt.Sprintf("No such image: %s (tag: %s)", config.Image, tag)
|
||||
}
|
||||
return "", nil, err
|
||||
return err.Error()
|
||||
}
|
||||
srv.LogEvent("create", container.ID, srv.runtime.repositories.ImageName(container.Image))
|
||||
return container.ID, buildWarnings, nil
|
||||
job.Printf("%s\n", container.ID)
|
||||
for _, warning := range buildWarnings {
|
||||
job.Errorf("%s\n", warning)
|
||||
}
|
||||
return "0"
|
||||
}
|
||||
|
||||
func (srv *Server) ContainerRestart(name string, t int) error {
|
||||
|
@ -1322,7 +1342,6 @@ func (srv *Server) RegisterLinks(name string, hostConfig *HostConfig) error {
|
|||
return fmt.Errorf("No such container: %s", name)
|
||||
}
|
||||
|
||||
// Register links
|
||||
if hostConfig != nil && hostConfig.Links != nil {
|
||||
for _, l := range hostConfig.Links {
|
||||
parts, err := parseLink(l)
|
||||
|
@ -1336,7 +1355,6 @@ func (srv *Server) RegisterLinks(name string, hostConfig *HostConfig) error {
|
|||
if child == nil {
|
||||
return fmt.Errorf("Could not get container for %s", parts["name"])
|
||||
}
|
||||
|
||||
if err := runtime.RegisterLink(container, child, parts["alias"]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1352,41 +1370,57 @@ func (srv *Server) RegisterLinks(name string, hostConfig *HostConfig) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (srv *Server) ContainerStart(name string, hostConfig *HostConfig) error {
|
||||
func (srv *Server) ContainerStart(job *engine.Job) string {
|
||||
if len(job.Args) < 1 {
|
||||
return fmt.Sprintf("Usage: %s container_id", job.Name)
|
||||
}
|
||||
name := job.Args[0]
|
||||
runtime := srv.runtime
|
||||
container := runtime.Get(name)
|
||||
|
||||
if hostConfig != nil {
|
||||
for _, bind := range hostConfig.Binds {
|
||||
splitBind := strings.Split(bind, ":")
|
||||
source := splitBind[0]
|
||||
|
||||
// refuse to bind mount "/" to the container
|
||||
if source == "/" {
|
||||
return fmt.Errorf("Invalid bind mount '%s' : source can't be '/'", bind)
|
||||
}
|
||||
|
||||
// ensure the source exists on the host
|
||||
_, err := os.Stat(source)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
return fmt.Errorf("Invalid bind mount '%s' : source doesn't exist", bind)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if container == nil {
|
||||
return fmt.Errorf("No such container: %s", name)
|
||||
return fmt.Sprintf("No such container: %s", name)
|
||||
}
|
||||
if hostConfig != nil {
|
||||
container.hostConfig = hostConfig
|
||||
// If no environment was set, then no hostconfig was passed.
|
||||
if len(job.Environ()) > 0 {
|
||||
var hostConfig HostConfig
|
||||
if err := job.ExportEnv(&hostConfig); err != nil {
|
||||
return err.Error()
|
||||
}
|
||||
// Validate the HostConfig binds. Make sure that:
|
||||
// 1) the source of a bind mount isn't /
|
||||
// The bind mount "/:/foo" isn't allowed.
|
||||
// 2) Check that the source exists
|
||||
// The source to be bind mounted must exist.
|
||||
for _, bind := range hostConfig.Binds {
|
||||
splitBind := strings.Split(bind, ":")
|
||||
source := splitBind[0]
|
||||
|
||||
// refuse to bind mount "/" to the container
|
||||
if source == "/" {
|
||||
return fmt.Sprintf("Invalid bind mount '%s' : source can't be '/'", bind)
|
||||
}
|
||||
|
||||
// ensure the source exists on the host
|
||||
_, err := os.Stat(source)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
return fmt.Sprintf("Invalid bind mount '%s' : source doesn't exist", bind)
|
||||
}
|
||||
}
|
||||
// Register any links from the host config before starting the container
|
||||
// FIXME: we could just pass the container here, no need to lookup by name again.
|
||||
if err := srv.RegisterLinks(name, &hostConfig); err != nil {
|
||||
return err.Error()
|
||||
}
|
||||
container.hostConfig = &hostConfig
|
||||
container.ToDisk()
|
||||
}
|
||||
if err := container.Start(); err != nil {
|
||||
return fmt.Errorf("Cannot start container %s: %s", name, err)
|
||||
return fmt.Sprintf("Cannot start container %s: %s", name, err)
|
||||
}
|
||||
srv.LogEvent("start", container.ID, runtime.repositories.ImageName(container.Image))
|
||||
|
||||
return nil
|
||||
return "0"
|
||||
}
|
||||
|
||||
func (srv *Server) ContainerStop(name string, t int) error {
|
||||
|
@ -1537,12 +1571,13 @@ func (srv *Server) ContainerCopy(name string, resource string, out io.Writer) er
|
|||
|
||||
}
|
||||
|
||||
func NewServer(config *DaemonConfig) (*Server, error) {
|
||||
func NewServer(eng *engine.Engine, config *DaemonConfig) (*Server, error) {
|
||||
runtime, err := NewRuntime(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srv := &Server{
|
||||
Eng: eng,
|
||||
runtime: runtime,
|
||||
pullingPool: make(map[string]struct{}),
|
||||
pushingPool: make(map[string]struct{}),
|
||||
|
@ -1586,4 +1621,5 @@ type Server struct {
|
|||
events []utils.JSONMessage
|
||||
listeners map[string]chan utils.JSONMessage
|
||||
reqFactory *utils.HTTPRequestFactory
|
||||
Eng *engine.Engine
|
||||
}
|
||||
|
|
115
server_test.go
115
server_test.go
|
@ -80,20 +80,17 @@ func TestContainerTagImageDelete(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestCreateRm(t *testing.T) {
|
||||
runtime := mkRuntime(t)
|
||||
eng := NewTestEngine(t)
|
||||
srv := mkServerFromEngine(eng, t)
|
||||
runtime := srv.runtime
|
||||
defer nuke(runtime)
|
||||
|
||||
srv := &Server{runtime: runtime}
|
||||
|
||||
config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
id, _, err := srv.ContainerCreate(config, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
id := createTestContainer(eng, config, t)
|
||||
|
||||
if len(runtime.List()) != 1 {
|
||||
t.Errorf("Expected 1 container, %v found", len(runtime.List()))
|
||||
|
@ -110,27 +107,28 @@ func TestCreateRm(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestCreateRmVolumes(t *testing.T) {
|
||||
runtime := mkRuntime(t)
|
||||
defer nuke(runtime)
|
||||
eng := NewTestEngine(t)
|
||||
|
||||
srv := &Server{runtime: runtime}
|
||||
srv := mkServerFromEngine(eng, t)
|
||||
runtime := srv.runtime
|
||||
defer nuke(runtime)
|
||||
|
||||
config, hostConfig, _, err := ParseRun([]string{"-v", "/srv", GetTestImage(runtime).ID, "echo test"}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
id, _, err := srv.ContainerCreate(config, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
id := createTestContainer(eng, config, t)
|
||||
|
||||
if len(runtime.List()) != 1 {
|
||||
t.Errorf("Expected 1 container, %v found", len(runtime.List()))
|
||||
}
|
||||
|
||||
err = srv.ContainerStart(id, hostConfig)
|
||||
if err != nil {
|
||||
job := eng.Job("start", id)
|
||||
if err := job.ImportEnv(hostConfig); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := job.Run(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -149,20 +147,17 @@ func TestCreateRmVolumes(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestCommit(t *testing.T) {
|
||||
runtime := mkRuntime(t)
|
||||
eng := NewTestEngine(t)
|
||||
srv := mkServerFromEngine(eng, t)
|
||||
runtime := srv.runtime
|
||||
defer nuke(runtime)
|
||||
|
||||
srv := &Server{runtime: runtime}
|
||||
|
||||
config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "/bin/cat"}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
id, _, err := srv.ContainerCreate(config, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
id := createTestContainer(eng, config, t)
|
||||
|
||||
if _, err := srv.ContainerCommit(id, "testrepo", "testtag", "", "", config); err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -170,26 +165,27 @@ func TestCommit(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestCreateStartRestartStopStartKillRm(t *testing.T) {
|
||||
runtime := mkRuntime(t)
|
||||
eng := NewTestEngine(t)
|
||||
srv := mkServerFromEngine(eng, t)
|
||||
runtime := srv.runtime
|
||||
defer nuke(runtime)
|
||||
|
||||
srv := &Server{runtime: runtime}
|
||||
|
||||
config, hostConfig, _, err := ParseRun([]string{GetTestImage(runtime).ID, "/bin/cat"}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
id, _, err := srv.ContainerCreate(config, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
id := createTestContainer(eng, config, t)
|
||||
|
||||
if len(runtime.List()) != 1 {
|
||||
t.Errorf("Expected 1 container, %v found", len(runtime.List()))
|
||||
}
|
||||
|
||||
if err := srv.ContainerStart(id, hostConfig); err != nil {
|
||||
job := eng.Job("start", id)
|
||||
if err := job.ImportEnv(hostConfig); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := job.Run(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -201,7 +197,11 @@ func TestCreateStartRestartStopStartKillRm(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := srv.ContainerStart(id, hostConfig); err != nil {
|
||||
job = eng.Job("start", id)
|
||||
if err := job.ImportEnv(hostConfig); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := job.Run(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -221,22 +221,22 @@ func TestCreateStartRestartStopStartKillRm(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRunWithTooLowMemoryLimit(t *testing.T) {
|
||||
runtime := mkRuntime(t)
|
||||
eng := NewTestEngine(t)
|
||||
srv := mkServerFromEngine(eng, t)
|
||||
runtime := srv.runtime
|
||||
defer nuke(runtime)
|
||||
|
||||
// Try to create a container with a memory limit of 1 byte less than the minimum allowed limit.
|
||||
if _, _, err := (*Server).ContainerCreate(&Server{runtime: runtime},
|
||||
&Config{
|
||||
Image: GetTestImage(runtime).ID,
|
||||
Memory: 524287,
|
||||
CpuShares: 1000,
|
||||
Cmd: []string{"/bin/cat"},
|
||||
},
|
||||
"",
|
||||
); err == nil {
|
||||
job := eng.Job("create")
|
||||
job.Setenv("Image", GetTestImage(runtime).ID)
|
||||
job.Setenv("Memory", "524287")
|
||||
job.Setenv("CpuShares", "1000")
|
||||
job.SetenvList("Cmd", []string{"/bin/cat"})
|
||||
var id string
|
||||
job.StdoutParseString(&id)
|
||||
if err := job.Run(); err == nil {
|
||||
t.Errorf("Memory limit is smaller than the allowed limit. Container creation should've failed!")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestContainerTop(t *testing.T) {
|
||||
|
@ -385,9 +385,10 @@ func TestLogEvent(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRmi(t *testing.T) {
|
||||
runtime := mkRuntime(t)
|
||||
eng := NewTestEngine(t)
|
||||
srv := mkServerFromEngine(eng, t)
|
||||
runtime := srv.runtime
|
||||
defer nuke(runtime)
|
||||
srv := &Server{runtime: runtime}
|
||||
|
||||
initialImages, err := srv.Images(false, "")
|
||||
if err != nil {
|
||||
|
@ -399,14 +400,14 @@ func TestRmi(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
containerID, _, err := srv.ContainerCreate(config, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
containerID := createTestContainer(eng, config, t)
|
||||
|
||||
//To remove
|
||||
err = srv.ContainerStart(containerID, hostConfig)
|
||||
if err != nil {
|
||||
job := eng.Job("start", containerID)
|
||||
if err := job.ImportEnv(hostConfig); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := job.Run(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -420,14 +421,14 @@ func TestRmi(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
containerID, _, err = srv.ContainerCreate(config, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
containerID = createTestContainer(eng, config, t)
|
||||
|
||||
//To remove
|
||||
err = srv.ContainerStart(containerID, hostConfig)
|
||||
if err != nil {
|
||||
job = eng.Job("start", containerID)
|
||||
if err := job.ImportEnv(hostConfig); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := job.Run(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
|
|
@ -28,6 +28,12 @@ var (
|
|||
INITSHA1 string // sha1sum of separate static dockerinit, if Docker itself was compiled dynamically via ./hack/make.sh dynbinary
|
||||
)
|
||||
|
||||
// A common interface to access the Fatal method of
|
||||
// both testing.B and testing.T.
|
||||
type Fataler interface {
|
||||
Fatal(args ...interface{})
|
||||
}
|
||||
|
||||
// ListOpts type
|
||||
type ListOpts []string
|
||||
|
||||
|
|
123
utils_test.go
123
utils_test.go
|
@ -2,6 +2,7 @@ package docker
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/engine"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
@ -20,62 +21,98 @@ var globalTestID string
|
|||
|
||||
// Create a temporary runtime suitable for unit testing.
|
||||
// Call t.Fatal() at the first error.
|
||||
func mkRuntime(f Fataler) *Runtime {
|
||||
// Use the caller function name as a prefix.
|
||||
// This helps trace temp directories back to their test.
|
||||
pc, _, _, _ := runtime.Caller(1)
|
||||
callerLongName := runtime.FuncForPC(pc).Name()
|
||||
parts := strings.Split(callerLongName, ".")
|
||||
callerShortName := parts[len(parts)-1]
|
||||
if globalTestID == "" {
|
||||
globalTestID = GenerateID()[:4]
|
||||
}
|
||||
prefix := fmt.Sprintf("docker-test%s-%s-", globalTestID, callerShortName)
|
||||
utils.Debugf("prefix = '%s'", prefix)
|
||||
|
||||
runtime, err := newTestRuntime(prefix)
|
||||
func mkRuntime(f utils.Fataler) *Runtime {
|
||||
root, err := newTestDirectory(unitTestStoreBase)
|
||||
if err != nil {
|
||||
f.Fatal(err)
|
||||
}
|
||||
return runtime
|
||||
config := &DaemonConfig{
|
||||
Root: root,
|
||||
AutoRestart: false,
|
||||
}
|
||||
r, err := NewRuntimeFromDirectory(config)
|
||||
if err != nil {
|
||||
f.Fatal(err)
|
||||
}
|
||||
r.UpdateCapabilities(true)
|
||||
return r
|
||||
}
|
||||
|
||||
// A common interface to access the Fatal method of
|
||||
// both testing.B and testing.T.
|
||||
type Fataler interface {
|
||||
Fatal(args ...interface{})
|
||||
func createNamedTestContainer(eng *engine.Engine, config *Config, f utils.Fataler, name string) (shortId string) {
|
||||
job := eng.Job("create", name)
|
||||
if err := job.ImportEnv(config); err != nil {
|
||||
f.Fatal(err)
|
||||
}
|
||||
job.StdoutParseString(&shortId)
|
||||
if err := job.Run(); err != nil {
|
||||
f.Fatal(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func newTestRuntime(prefix string) (runtime *Runtime, err error) {
|
||||
func createTestContainer(eng *engine.Engine, config *Config, f utils.Fataler) (shortId string) {
|
||||
return createNamedTestContainer(eng, config, f, "")
|
||||
}
|
||||
|
||||
func mkServerFromEngine(eng *engine.Engine, t utils.Fataler) *Server {
|
||||
iSrv := eng.Hack_GetGlobalVar("httpapi.server")
|
||||
if iSrv == nil {
|
||||
panic("Legacy server field not set in engine")
|
||||
}
|
||||
srv, ok := iSrv.(*Server)
|
||||
if !ok {
|
||||
panic("Legacy server field in engine does not cast to *Server")
|
||||
}
|
||||
return srv
|
||||
}
|
||||
|
||||
|
||||
func NewTestEngine(t utils.Fataler) *engine.Engine {
|
||||
root, err := newTestDirectory(unitTestStoreBase)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
eng, err := engine.New(root)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Load default plugins
|
||||
// (This is manually copied and modified from main() until we have a more generic plugin system)
|
||||
job := eng.Job("initapi")
|
||||
job.Setenv("Root", root)
|
||||
job.SetenvBool("AutoRestart", false)
|
||||
if err := job.Run(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return eng
|
||||
}
|
||||
|
||||
func newTestDirectory(templateDir string) (dir string, err error) {
|
||||
if globalTestID == "" {
|
||||
globalTestID = GenerateID()[:4]
|
||||
}
|
||||
prefix := fmt.Sprintf("docker-test%s-%s-", globalTestID, getCallerName(2))
|
||||
if prefix == "" {
|
||||
prefix = "docker-test-"
|
||||
}
|
||||
utils.Debugf("prefix = %s", prefix)
|
||||
utils.Debugf("newTestRuntime start")
|
||||
root, err := ioutil.TempDir("", prefix)
|
||||
defer func() {
|
||||
utils.Debugf("newTestRuntime: %s", root)
|
||||
}()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
dir, err = ioutil.TempDir("", prefix)
|
||||
if err = os.Remove(dir); err != nil {
|
||||
return
|
||||
}
|
||||
if err := os.Remove(root); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := utils.CopyDirectory(unitTestStoreBase, root); err != nil {
|
||||
return nil, err
|
||||
if err = utils.CopyDirectory(templateDir, dir); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
config := &DaemonConfig{
|
||||
Root: root,
|
||||
AutoRestart: false,
|
||||
}
|
||||
runtime, err = NewRuntimeFromDirectory(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
runtime.UpdateCapabilities(true)
|
||||
return runtime, nil
|
||||
func getCallerName(depth int) string {
|
||||
// Use the caller function name as a prefix.
|
||||
// This helps trace temp directories back to their test.
|
||||
pc, _, _, _ := runtime.Caller(depth + 1)
|
||||
callerLongName := runtime.FuncForPC(pc).Name()
|
||||
parts := strings.Split(callerLongName, ".")
|
||||
callerShortName := parts[len(parts)-1]
|
||||
return callerShortName
|
||||
}
|
||||
|
||||
// Write `content` to the file at path `dst`, creating it if necessary,
|
||||
|
|
Loading…
Reference in a new issue