mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
Move runtime and container into sub pkg
Docker-DCO-1.1-Signed-off-by: Michael Crosby <michael@crosbymichael.com> (github: crosbymichael)
This commit is contained in:
parent
01b6b2be73
commit
36c3614fdd
15 changed files with 251 additions and 186 deletions
17
buildfile.go
17
buildfile.go
|
@ -10,6 +10,7 @@ import (
|
|||
"github.com/dotcloud/docker/auth"
|
||||
"github.com/dotcloud/docker/registry"
|
||||
"github.com/dotcloud/docker/runconfig"
|
||||
"github.com/dotcloud/docker/runtime"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
@ -34,7 +35,7 @@ type BuildFile interface {
|
|||
}
|
||||
|
||||
type buildFile struct {
|
||||
runtime *Runtime
|
||||
runtime *runtime.Runtime
|
||||
srv *Server
|
||||
|
||||
image string
|
||||
|
@ -74,9 +75,9 @@ func (b *buildFile) clearTmp(containers map[string]struct{}) {
|
|||
}
|
||||
|
||||
func (b *buildFile) CmdFrom(name string) error {
|
||||
image, err := b.runtime.repositories.LookupImage(name)
|
||||
image, err := b.runtime.Repositories().LookupImage(name)
|
||||
if err != nil {
|
||||
if b.runtime.graph.IsNotExist(err) {
|
||||
if b.runtime.Graph().IsNotExist(err) {
|
||||
remote, tag := utils.ParseRepositoryTag(name)
|
||||
pullRegistryAuth := b.authConfig
|
||||
if len(b.configFile.Configs) > 0 {
|
||||
|
@ -96,7 +97,7 @@ func (b *buildFile) CmdFrom(name string) error {
|
|||
if err := job.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
image, err = b.runtime.repositories.LookupImage(name)
|
||||
image, err = b.runtime.Repositories().LookupImage(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -110,7 +111,7 @@ func (b *buildFile) CmdFrom(name string) error {
|
|||
b.config = image.Config
|
||||
}
|
||||
if b.config.Env == nil || len(b.config.Env) == 0 {
|
||||
b.config.Env = append(b.config.Env, "HOME=/", "PATH="+defaultPathEnv)
|
||||
b.config.Env = append(b.config.Env, "HOME=/", "PATH="+runtime.DefaultPathEnv)
|
||||
}
|
||||
// Process ONBUILD triggers if they exist
|
||||
if nTriggers := len(b.config.OnBuild); nTriggers != 0 {
|
||||
|
@ -371,7 +372,7 @@ func (b *buildFile) checkPathForAddition(orig string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (b *buildFile) addContext(container *Container, orig, dest string, remote bool) error {
|
||||
func (b *buildFile) addContext(container *runtime.Container, orig, dest string, remote bool) error {
|
||||
var (
|
||||
origPath = path.Join(b.contextPath, orig)
|
||||
destPath = path.Join(container.BasefsPath(), dest)
|
||||
|
@ -604,7 +605,7 @@ func (sf *StderrFormater) Write(buf []byte) (int, error) {
|
|||
return len(buf), err
|
||||
}
|
||||
|
||||
func (b *buildFile) create() (*Container, error) {
|
||||
func (b *buildFile) create() (*runtime.Container, error) {
|
||||
if b.image == "" {
|
||||
return nil, fmt.Errorf("Please provide a source image with `from` prior to run")
|
||||
}
|
||||
|
@ -625,7 +626,7 @@ func (b *buildFile) create() (*Container, error) {
|
|||
return c, nil
|
||||
}
|
||||
|
||||
func (b *buildFile) run(c *Container) error {
|
||||
func (b *buildFile) run(c *runtime.Container) error {
|
||||
var errCh chan error
|
||||
|
||||
if b.verbose {
|
||||
|
|
|
@ -5,12 +5,12 @@ import (
|
|||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker"
|
||||
"github.com/dotcloud/docker/api"
|
||||
"github.com/dotcloud/docker/dockerversion"
|
||||
"github.com/dotcloud/docker/engine"
|
||||
"github.com/dotcloud/docker/image"
|
||||
"github.com/dotcloud/docker/runconfig"
|
||||
"github.com/dotcloud/docker/runtime"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
|
||||
"io"
|
||||
|
@ -600,7 +600,7 @@ func TestGetContainersByName(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
assertHttpNotError(r, t)
|
||||
outContainer := &docker.Container{}
|
||||
outContainer := &runtime.Container{}
|
||||
if err := json.Unmarshal(r.Body.Bytes(), outContainer); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -3,11 +3,11 @@ package docker
|
|||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker"
|
||||
"github.com/dotcloud/docker/api"
|
||||
"github.com/dotcloud/docker/engine"
|
||||
"github.com/dotcloud/docker/image"
|
||||
"github.com/dotcloud/docker/pkg/term"
|
||||
"github.com/dotcloud/docker/runtime"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
@ -36,7 +36,7 @@ func closeWrap(args ...io.Closer) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func setRaw(t *testing.T, c *docker.Container) *term.State {
|
||||
func setRaw(t *testing.T, c *runtime.Container) *term.State {
|
||||
pty, err := c.GetPtyMaster()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -48,7 +48,7 @@ func setRaw(t *testing.T, c *docker.Container) *term.State {
|
|||
return state
|
||||
}
|
||||
|
||||
func unsetRaw(t *testing.T, c *docker.Container, state *term.State) {
|
||||
func unsetRaw(t *testing.T, c *runtime.Container, state *term.State) {
|
||||
pty, err := c.GetPtyMaster()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -56,8 +56,8 @@ func unsetRaw(t *testing.T, c *docker.Container, state *term.State) {
|
|||
term.RestoreTerminal(pty.Fd(), state)
|
||||
}
|
||||
|
||||
func waitContainerStart(t *testing.T, timeout time.Duration) *docker.Container {
|
||||
var container *docker.Container
|
||||
func waitContainerStart(t *testing.T, timeout time.Duration) *runtime.Container {
|
||||
var container *runtime.Container
|
||||
|
||||
setTimeout(t, "Waiting for the container to be started timed out", timeout, func() {
|
||||
for {
|
||||
|
|
|
@ -3,11 +3,11 @@ package docker
|
|||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker"
|
||||
"github.com/dotcloud/docker/engine"
|
||||
"github.com/dotcloud/docker/image"
|
||||
"github.com/dotcloud/docker/nat"
|
||||
"github.com/dotcloud/docker/runconfig"
|
||||
"github.com/dotcloud/docker/runtime"
|
||||
"github.com/dotcloud/docker/sysinit"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io"
|
||||
|
@ -16,7 +16,7 @@ import (
|
|||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
goruntime "runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
@ -36,14 +36,14 @@ const (
|
|||
|
||||
var (
|
||||
// FIXME: globalRuntime is deprecated by globalEngine. All tests should be converted.
|
||||
globalRuntime *docker.Runtime
|
||||
globalRuntime *runtime.Runtime
|
||||
globalEngine *engine.Engine
|
||||
startFds int
|
||||
startGoroutines int
|
||||
)
|
||||
|
||||
// FIXME: nuke() is deprecated by Runtime.Nuke()
|
||||
func nuke(runtime *docker.Runtime) error {
|
||||
func nuke(runtime *runtime.Runtime) error {
|
||||
return runtime.Nuke()
|
||||
}
|
||||
|
||||
|
@ -120,7 +120,7 @@ func init() {
|
|||
|
||||
// Create the "global runtime" with a long-running daemon for integration tests
|
||||
spawnGlobalDaemon()
|
||||
startFds, startGoroutines = utils.GetTotalUsedFds(), runtime.NumGoroutine()
|
||||
startFds, startGoroutines = utils.GetTotalUsedFds(), goruntime.NumGoroutine()
|
||||
}
|
||||
|
||||
func setupBaseImage() {
|
||||
|
@ -173,7 +173,7 @@ func spawnGlobalDaemon() {
|
|||
|
||||
// FIXME: test that ImagePull(json=true) send correct json output
|
||||
|
||||
func GetTestImage(runtime *docker.Runtime) *image.Image {
|
||||
func GetTestImage(runtime *runtime.Runtime) *image.Image {
|
||||
imgs, err := runtime.Graph().Map()
|
||||
if err != nil {
|
||||
log.Fatalf("Unable to get the test image: %s", err)
|
||||
|
@ -357,7 +357,7 @@ func TestGet(t *testing.T) {
|
|||
|
||||
}
|
||||
|
||||
func startEchoServerContainer(t *testing.T, proto string) (*docker.Runtime, *docker.Container, string) {
|
||||
func startEchoServerContainer(t *testing.T, proto string) (*runtime.Runtime, *runtime.Container, string) {
|
||||
var (
|
||||
err error
|
||||
id string
|
||||
|
|
|
@ -18,6 +18,7 @@ import (
|
|||
"github.com/dotcloud/docker/builtins"
|
||||
"github.com/dotcloud/docker/engine"
|
||||
"github.com/dotcloud/docker/runconfig"
|
||||
"github.com/dotcloud/docker/runtime"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
)
|
||||
|
||||
|
@ -27,7 +28,7 @@ import (
|
|||
|
||||
// Create a temporary runtime suitable for unit testing.
|
||||
// Call t.Fatal() at the first error.
|
||||
func mkRuntime(f utils.Fataler) *docker.Runtime {
|
||||
func mkRuntime(f utils.Fataler) *runtime.Runtime {
|
||||
eng := newTestEngine(f, false, "")
|
||||
return mkRuntimeFromEngine(eng, f)
|
||||
// FIXME:
|
||||
|
@ -139,7 +140,7 @@ func assertHttpError(r *httptest.ResponseRecorder, t utils.Fataler) {
|
|||
}
|
||||
}
|
||||
|
||||
func getContainer(eng *engine.Engine, id string, t utils.Fataler) *docker.Container {
|
||||
func getContainer(eng *engine.Engine, id string, t utils.Fataler) *runtime.Container {
|
||||
runtime := mkRuntimeFromEngine(eng, t)
|
||||
c := runtime.Get(id)
|
||||
if c == nil {
|
||||
|
@ -160,14 +161,14 @@ func mkServerFromEngine(eng *engine.Engine, t utils.Fataler) *docker.Server {
|
|||
return srv
|
||||
}
|
||||
|
||||
func mkRuntimeFromEngine(eng *engine.Engine, t utils.Fataler) *docker.Runtime {
|
||||
func mkRuntimeFromEngine(eng *engine.Engine, t utils.Fataler) *runtime.Runtime {
|
||||
iRuntime := eng.Hack_GetGlobalVar("httpapi.runtime")
|
||||
if iRuntime == nil {
|
||||
panic("Legacy runtime field not set in engine")
|
||||
}
|
||||
runtime, ok := iRuntime.(*docker.Runtime)
|
||||
runtime, ok := iRuntime.(*runtime.Runtime)
|
||||
if !ok {
|
||||
panic("Legacy runtime field in engine does not cast to *docker.Runtime")
|
||||
panic("Legacy runtime field in engine does not cast to *runtime.Runtime")
|
||||
}
|
||||
return runtime
|
||||
}
|
||||
|
@ -249,7 +250,7 @@ func readFile(src string, t *testing.T) (content string) {
|
|||
// dynamically replaced by the current test image.
|
||||
// The caller is responsible for destroying the container.
|
||||
// Call t.Fatal() at the first error.
|
||||
func mkContainer(r *docker.Runtime, args []string, t *testing.T) (*docker.Container, *runconfig.HostConfig, error) {
|
||||
func mkContainer(r *runtime.Runtime, args []string, t *testing.T) (*runtime.Container, *runconfig.HostConfig, error) {
|
||||
config, hc, _, err := runconfig.Parse(args, nil)
|
||||
defer func() {
|
||||
if err != nil && t != nil {
|
||||
|
@ -280,7 +281,7 @@ func mkContainer(r *docker.Runtime, args []string, t *testing.T) (*docker.Contai
|
|||
// and return its standard output as a string.
|
||||
// The image name (eg. the XXX in []string{"-i", "-t", "XXX", "bash"}, is dynamically replaced by the current test image.
|
||||
// If t is not nil, call t.Fatal() at the first error. Otherwise return errors normally.
|
||||
func runContainer(eng *engine.Engine, r *docker.Runtime, args []string, t *testing.T) (output string, err error) {
|
||||
func runContainer(eng *engine.Engine, r *runtime.Runtime, args []string, t *testing.T) (output string, err error) {
|
||||
defer func() {
|
||||
if err != nil && t != nil {
|
||||
t.Fatal(err)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
package docker
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
@ -24,7 +24,7 @@ import (
|
|||
"time"
|
||||
)
|
||||
|
||||
const defaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
||||
const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
||||
|
||||
var (
|
||||
ErrNotATTY = errors.New("The PTY is not a file")
|
||||
|
@ -174,7 +174,7 @@ func (container *Container) ToDisk() (err error) {
|
|||
if err != nil {
|
||||
return
|
||||
}
|
||||
return container.writeHostConfig()
|
||||
return container.WriteHostConfig()
|
||||
}
|
||||
|
||||
func (container *Container) readHostConfig() error {
|
||||
|
@ -193,7 +193,7 @@ func (container *Container) readHostConfig() error {
|
|||
return json.Unmarshal(data, container.hostConfig)
|
||||
}
|
||||
|
||||
func (container *Container) writeHostConfig() (err error) {
|
||||
func (container *Container) WriteHostConfig() (err error) {
|
||||
data, err := json.Marshal(container.hostConfig)
|
||||
if err != nil {
|
||||
return
|
||||
|
@ -451,7 +451,7 @@ func (container *Container) Start() (err error) {
|
|||
// Setup environment
|
||||
env := []string{
|
||||
"HOME=/",
|
||||
"PATH=" + defaultPathEnv,
|
||||
"PATH=" + DefaultPathEnv,
|
||||
"HOSTNAME=" + container.Config.Hostname,
|
||||
}
|
||||
|
||||
|
@ -693,7 +693,7 @@ func (container *Container) allocateNetwork() error {
|
|||
return err
|
||||
}
|
||||
container.Config.PortSpecs = nil
|
||||
if err := container.writeHostConfig(); err != nil {
|
||||
if err := container.WriteHostConfig(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -751,7 +751,7 @@ func (container *Container) allocateNetwork() error {
|
|||
}
|
||||
bindings[port] = binding
|
||||
}
|
||||
container.writeHostConfig()
|
||||
container.WriteHostConfig()
|
||||
|
||||
container.NetworkSettings.Ports = bindings
|
||||
|
||||
|
@ -850,7 +850,7 @@ func (container *Container) cleanup() {
|
|||
}
|
||||
}
|
||||
|
||||
func (container *Container) kill(sig int) error {
|
||||
func (container *Container) KillSig(sig int) error {
|
||||
container.Lock()
|
||||
defer container.Unlock()
|
||||
|
||||
|
@ -866,7 +866,7 @@ func (container *Container) Kill() error {
|
|||
}
|
||||
|
||||
// 1. Send SIGKILL
|
||||
if err := container.kill(9); err != nil {
|
||||
if err := container.KillSig(9); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -891,10 +891,10 @@ func (container *Container) Stop(seconds int) error {
|
|||
}
|
||||
|
||||
// 1. Send a SIGTERM
|
||||
if err := container.kill(15); err != nil {
|
||||
if err := container.KillSig(15); err != nil {
|
||||
utils.Debugf("Error sending kill SIGTERM: %s", err)
|
||||
log.Print("Failed to send SIGTERM to the process, force killing")
|
||||
if err := container.kill(9); err != nil {
|
||||
if err := container.KillSig(9); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -1141,3 +1141,21 @@ func (container *Container) GetPtyMaster() (*os.File, error) {
|
|||
}
|
||||
return ttyConsole.Master(), nil
|
||||
}
|
||||
|
||||
func (container *Container) HostConfig() *runconfig.HostConfig {
|
||||
return container.hostConfig
|
||||
}
|
||||
|
||||
func (container *Container) SetHostConfig(hostConfig *runconfig.HostConfig) {
|
||||
container.hostConfig = hostConfig
|
||||
}
|
||||
|
||||
func (container *Container) DisableLink(name string) {
|
||||
if container.activeLinks != nil {
|
||||
if link, exists := container.activeLinks[name]; exists {
|
||||
link.Disable()
|
||||
} else {
|
||||
utils.Debugf("Could not find active link for %s", name)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package docker
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"github.com/dotcloud/docker/nat"
|
||||
|
@ -132,14 +132,14 @@ func TestParseNetworkOptsUdp(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGetFullName(t *testing.T) {
|
||||
name, err := getFullName("testing")
|
||||
name, err := GetFullContainerName("testing")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if name != "/testing" {
|
||||
t.Fatalf("Expected /testing got %s", name)
|
||||
}
|
||||
if _, err := getFullName(""); err == nil {
|
||||
if _, err := GetFullContainerName(""); err == nil {
|
||||
t.Fatal("Error should not be nil")
|
||||
}
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package docker
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
|
@ -40,7 +40,7 @@ import (
|
|||
const MaxImageDepth = 127
|
||||
|
||||
var (
|
||||
defaultDns = []string{"8.8.8.8", "8.8.4.4"}
|
||||
DefaultDns = []string{"8.8.8.8", "8.8.4.4"}
|
||||
validContainerNameChars = `[a-zA-Z0-9_.-]`
|
||||
validContainerNamePattern = regexp.MustCompile(`^/?` + validContainerNameChars + `+$`)
|
||||
)
|
||||
|
@ -54,7 +54,7 @@ type Runtime struct {
|
|||
idIndex *utils.TruncIndex
|
||||
sysInfo *sysinfo.SysInfo
|
||||
volumes *graph.Graph
|
||||
srv *Server
|
||||
srv Server
|
||||
eng *engine.Engine
|
||||
config *daemonconfig.Config
|
||||
containerGraph *graphdb.Database
|
||||
|
@ -500,8 +500,7 @@ func (runtime *Runtime) Create(config *runconfig.Config, name string) (*Containe
|
|||
}
|
||||
|
||||
if len(config.Dns) == 0 && len(runtime.config.Dns) == 0 && utils.CheckLocalDns(resolvConf) {
|
||||
//"WARNING: Docker detected local DNS server on resolv.conf. Using default external servers: %v", defaultDns
|
||||
runtime.config.Dns = defaultDns
|
||||
runtime.config.Dns = DefaultDns
|
||||
}
|
||||
|
||||
// If custom dns exists, then create a resolv.conf for the container
|
||||
|
@ -578,7 +577,7 @@ func (runtime *Runtime) Commit(container *Container, repository, tag, comment, a
|
|||
return img, nil
|
||||
}
|
||||
|
||||
func getFullName(name string) (string, error) {
|
||||
func GetFullContainerName(name string) (string, error) {
|
||||
if name == "" {
|
||||
return "", fmt.Errorf("Container name cannot be empty")
|
||||
}
|
||||
|
@ -589,7 +588,7 @@ func getFullName(name string) (string, error) {
|
|||
}
|
||||
|
||||
func (runtime *Runtime) GetByName(name string) (*Container, error) {
|
||||
fullName, err := getFullName(name)
|
||||
fullName, err := GetFullContainerName(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -605,7 +604,7 @@ func (runtime *Runtime) GetByName(name string) (*Container, error) {
|
|||
}
|
||||
|
||||
func (runtime *Runtime) Children(name string) (map[string]*Container, error) {
|
||||
name, err := getFullName(name)
|
||||
name, err := GetFullContainerName(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -892,6 +891,42 @@ func (runtime *Runtime) Graph() *graph.Graph {
|
|||
return runtime.graph
|
||||
}
|
||||
|
||||
func (runtime *Runtime) Repositories() *graph.TagStore {
|
||||
return runtime.repositories
|
||||
}
|
||||
|
||||
func (runtime *Runtime) Config() *daemonconfig.Config {
|
||||
return runtime.config
|
||||
}
|
||||
|
||||
func (runtime *Runtime) SystemConfig() *sysinfo.SysInfo {
|
||||
return runtime.sysInfo
|
||||
}
|
||||
|
||||
func (runtime *Runtime) SystemInitPath() string {
|
||||
return runtime.sysInitPath
|
||||
}
|
||||
|
||||
func (runtime *Runtime) GraphDriver() graphdriver.Driver {
|
||||
return runtime.driver
|
||||
}
|
||||
|
||||
func (runtime *Runtime) ExecutionDriver() execdriver.Driver {
|
||||
return runtime.execDriver
|
||||
}
|
||||
|
||||
func (runtime *Runtime) Volumes() *graph.Graph {
|
||||
return runtime.volumes
|
||||
}
|
||||
|
||||
func (runtime *Runtime) ContainerGraph() *graphdb.Database {
|
||||
return runtime.containerGraph
|
||||
}
|
||||
|
||||
func (runtime *Runtime) SetServer(server Server) {
|
||||
runtime.srv = server
|
||||
}
|
||||
|
||||
// History is a convenience type for storing a list of containers,
|
||||
// ordered by creation date.
|
||||
type History []*Container
|
9
runtime/server.go
Normal file
9
runtime/server.go
Normal file
|
@ -0,0 +1,9 @@
|
|||
package runtime
|
||||
|
||||
import (
|
||||
"github.com/dotcloud/docker/utils"
|
||||
)
|
||||
|
||||
type Server interface {
|
||||
LogEvent(action, id, from string) *utils.JSONMessage
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package docker
|
||||
package runtime
|
||||
|
||||
import "sort"
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
package docker
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"fmt"
|
44
runtime/utils.go
Normal file
44
runtime/utils.go
Normal file
|
@ -0,0 +1,44 @@
|
|||
package runtime
|
||||
|
||||
import (
|
||||
"github.com/dotcloud/docker/nat"
|
||||
"github.com/dotcloud/docker/pkg/namesgenerator"
|
||||
"github.com/dotcloud/docker/runconfig"
|
||||
)
|
||||
|
||||
func migratePortMappings(config *runconfig.Config, hostConfig *runconfig.HostConfig) error {
|
||||
if config.PortSpecs != nil {
|
||||
ports, bindings, err := nat.ParsePortSpecs(config.PortSpecs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
config.PortSpecs = nil
|
||||
if len(bindings) > 0 {
|
||||
if hostConfig == nil {
|
||||
hostConfig = &runconfig.HostConfig{}
|
||||
}
|
||||
hostConfig.PortBindings = bindings
|
||||
}
|
||||
|
||||
if config.ExposedPorts == nil {
|
||||
config.ExposedPorts = make(nat.PortSet, len(ports))
|
||||
}
|
||||
for k, v := range ports {
|
||||
config.ExposedPorts[k] = v
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type checker struct {
|
||||
runtime *Runtime
|
||||
}
|
||||
|
||||
func (c *checker) Exists(name string) bool {
|
||||
return c.runtime.containerGraph.Exists("/" + name)
|
||||
}
|
||||
|
||||
// Generate a random and unique name
|
||||
func generateRandomName(runtime *Runtime) (string, error) {
|
||||
return namesgenerator.GenerateRandomName(&checker{runtime})
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package docker
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"fmt"
|
181
server.go
181
server.go
|
@ -13,6 +13,7 @@ import (
|
|||
"github.com/dotcloud/docker/pkg/graphdb"
|
||||
"github.com/dotcloud/docker/registry"
|
||||
"github.com/dotcloud/docker/runconfig"
|
||||
"github.com/dotcloud/docker/runtime"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
@ -24,7 +25,7 @@ import (
|
|||
"os/signal"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
goruntime "runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
@ -41,9 +42,9 @@ func InitServer(job *engine.Job) engine.Status {
|
|||
if err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
if srv.runtime.config.Pidfile != "" {
|
||||
if srv.runtime.Config().Pidfile != "" {
|
||||
job.Logf("Creating pidfile")
|
||||
if err := utils.CreatePidFile(srv.runtime.config.Pidfile); err != nil {
|
||||
if err := utils.CreatePidFile(srv.runtime.Config().Pidfile); err != nil {
|
||||
// FIXME: do we need fatal here instead of returning a job error?
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
@ -54,7 +55,7 @@ func InitServer(job *engine.Job) engine.Status {
|
|||
go func() {
|
||||
sig := <-c
|
||||
log.Printf("Received signal '%v', exiting\n", sig)
|
||||
utils.RemovePidFile(srv.runtime.config.Pidfile)
|
||||
utils.RemovePidFile(srv.runtime.Config().Pidfile)
|
||||
srv.Close()
|
||||
os.Exit(0)
|
||||
}()
|
||||
|
@ -181,10 +182,10 @@ func (srv *Server) ContainerKill(job *engine.Job) engine.Status {
|
|||
if err := container.Kill(); err != nil {
|
||||
return job.Errorf("Cannot kill container %s: %s", name, err)
|
||||
}
|
||||
srv.LogEvent("kill", container.ID, srv.runtime.repositories.ImageName(container.Image))
|
||||
srv.LogEvent("kill", container.ID, srv.runtime.Repositories().ImageName(container.Image))
|
||||
} else {
|
||||
// Otherwise, just send the requested signal
|
||||
if err := container.kill(int(sig)); err != nil {
|
||||
if err := container.KillSig(int(sig)); err != nil {
|
||||
return job.Errorf("Cannot kill container %s: %s", name, err)
|
||||
}
|
||||
// FIXME: Add event for signals
|
||||
|
@ -293,7 +294,7 @@ func (srv *Server) ContainerExport(job *engine.Job) engine.Status {
|
|||
return job.Errorf("%s: %s", name, err)
|
||||
}
|
||||
// FIXME: factor job-specific LogEvent to engine.Job.Run()
|
||||
srv.LogEvent("export", container.ID, srv.runtime.repositories.ImageName(container.Image))
|
||||
srv.LogEvent("export", container.ID, srv.runtime.Repositories().ImageName(container.Image))
|
||||
return engine.StatusOK
|
||||
}
|
||||
return job.Errorf("No such container: %s", name)
|
||||
|
@ -318,7 +319,7 @@ func (srv *Server) ImageExport(job *engine.Job) engine.Status {
|
|||
|
||||
utils.Debugf("Serializing %s", name)
|
||||
|
||||
rootRepo, err := srv.runtime.repositories.Get(name)
|
||||
rootRepo, err := srv.runtime.Repositories().Get(name)
|
||||
if err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
|
@ -494,7 +495,7 @@ func (srv *Server) Build(job *engine.Job) engine.Status {
|
|||
return job.Error(err)
|
||||
}
|
||||
if repoName != "" {
|
||||
srv.runtime.repositories.Set(repoName, tag, id, false)
|
||||
srv.runtime.Repositories().Set(repoName, tag, id, false)
|
||||
}
|
||||
return engine.StatusOK
|
||||
}
|
||||
|
@ -555,7 +556,7 @@ func (srv *Server) ImageLoad(job *engine.Job) engine.Status {
|
|||
|
||||
for imageName, tagMap := range repositories {
|
||||
for tag, address := range tagMap {
|
||||
if err := srv.runtime.repositories.Set(imageName, tag, address, true); err != nil {
|
||||
if err := srv.runtime.Repositories().Set(imageName, tag, address, true); err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
}
|
||||
|
@ -588,13 +589,13 @@ func (srv *Server) recursiveLoad(address, tmpImageDir string) error {
|
|||
return err
|
||||
}
|
||||
if img.Parent != "" {
|
||||
if !srv.runtime.graph.Exists(img.Parent) {
|
||||
if !srv.runtime.Graph().Exists(img.Parent) {
|
||||
if err := srv.recursiveLoad(img.Parent, tmpImageDir); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := srv.runtime.graph.Register(imageJson, layer, img); err != nil {
|
||||
if err := srv.runtime.Graph().Register(imageJson, layer, img); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -650,7 +651,7 @@ func (srv *Server) ImageInsert(job *engine.Job) engine.Status {
|
|||
sf := utils.NewStreamFormatter(job.GetenvBool("json"))
|
||||
|
||||
out := utils.NewWriteFlusher(job.Stdout)
|
||||
img, err := srv.runtime.repositories.LookupImage(name)
|
||||
img, err := srv.runtime.Repositories().LookupImage(name)
|
||||
if err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
|
@ -661,7 +662,7 @@ func (srv *Server) ImageInsert(job *engine.Job) engine.Status {
|
|||
}
|
||||
defer file.Body.Close()
|
||||
|
||||
config, _, _, err := runconfig.Parse([]string{img.ID, "echo", "insert", url, path}, srv.runtime.sysInfo)
|
||||
config, _, _, err := runconfig.Parse([]string{img.ID, "echo", "insert", url, path}, srv.runtime.SystemConfig())
|
||||
if err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
|
@ -685,7 +686,7 @@ func (srv *Server) ImageInsert(job *engine.Job) engine.Status {
|
|||
}
|
||||
|
||||
func (srv *Server) ImagesViz(job *engine.Job) engine.Status {
|
||||
images, _ := srv.runtime.graph.Map()
|
||||
images, _ := srv.runtime.Graph().Map()
|
||||
if images == nil {
|
||||
return engine.StatusOK
|
||||
}
|
||||
|
@ -709,7 +710,7 @@ func (srv *Server) ImagesViz(job *engine.Job) engine.Status {
|
|||
|
||||
reporefs := make(map[string][]string)
|
||||
|
||||
for name, repository := range srv.runtime.repositories.Repositories {
|
||||
for name, repository := range srv.runtime.Repositories().Repositories {
|
||||
for tag, id := range repository {
|
||||
reporefs[utils.TruncateID(id)] = append(reporefs[utils.TruncateID(id)], fmt.Sprintf("%s:%s", name, tag))
|
||||
}
|
||||
|
@ -728,22 +729,22 @@ func (srv *Server) Images(job *engine.Job) engine.Status {
|
|||
err error
|
||||
)
|
||||
if job.GetenvBool("all") {
|
||||
allImages, err = srv.runtime.graph.Map()
|
||||
allImages, err = srv.runtime.Graph().Map()
|
||||
} else {
|
||||
allImages, err = srv.runtime.graph.Heads()
|
||||
allImages, err = srv.runtime.Graph().Heads()
|
||||
}
|
||||
if err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
lookup := make(map[string]*engine.Env)
|
||||
for name, repository := range srv.runtime.repositories.Repositories {
|
||||
for name, repository := range srv.runtime.Repositories().Repositories {
|
||||
if job.Getenv("filter") != "" {
|
||||
if match, _ := path.Match(job.Getenv("filter"), name); !match {
|
||||
continue
|
||||
}
|
||||
}
|
||||
for tag, id := range repository {
|
||||
image, err := srv.runtime.graph.Get(id)
|
||||
image, err := srv.runtime.Graph().Get(id)
|
||||
if err != nil {
|
||||
log.Printf("Warning: couldn't load %s from %s/%s: %s", id, name, tag, err)
|
||||
continue
|
||||
|
@ -793,7 +794,7 @@ func (srv *Server) Images(job *engine.Job) engine.Status {
|
|||
}
|
||||
|
||||
func (srv *Server) DockerInfo(job *engine.Job) engine.Status {
|
||||
images, _ := srv.runtime.graph.Map()
|
||||
images, _ := srv.runtime.Graph().Map()
|
||||
var imgcount int
|
||||
if images == nil {
|
||||
imgcount = 0
|
||||
|
@ -809,21 +810,21 @@ func (srv *Server) DockerInfo(job *engine.Job) engine.Status {
|
|||
initPath := utils.DockerInitPath("")
|
||||
if initPath == "" {
|
||||
// if that fails, we'll just return the path from the runtime
|
||||
initPath = srv.runtime.sysInitPath
|
||||
initPath = srv.runtime.SystemInitPath()
|
||||
}
|
||||
|
||||
v := &engine.Env{}
|
||||
v.SetInt("Containers", len(srv.runtime.List()))
|
||||
v.SetInt("Images", imgcount)
|
||||
v.Set("Driver", srv.runtime.driver.String())
|
||||
v.SetJson("DriverStatus", srv.runtime.driver.Status())
|
||||
v.SetBool("MemoryLimit", srv.runtime.sysInfo.MemoryLimit)
|
||||
v.SetBool("SwapLimit", srv.runtime.sysInfo.SwapLimit)
|
||||
v.SetBool("IPv4Forwarding", !srv.runtime.sysInfo.IPv4ForwardingDisabled)
|
||||
v.Set("Driver", srv.runtime.GraphDriver().String())
|
||||
v.SetJson("DriverStatus", srv.runtime.GraphDriver().Status())
|
||||
v.SetBool("MemoryLimit", srv.runtime.SystemConfig().MemoryLimit)
|
||||
v.SetBool("SwapLimit", srv.runtime.SystemConfig().SwapLimit)
|
||||
v.SetBool("IPv4Forwarding", !srv.runtime.SystemConfig().IPv4ForwardingDisabled)
|
||||
v.SetBool("Debug", os.Getenv("DEBUG") != "")
|
||||
v.SetInt("NFd", utils.GetTotalUsedFds())
|
||||
v.SetInt("NGoroutines", runtime.NumGoroutine())
|
||||
v.Set("ExecutionDriver", srv.runtime.execDriver.Name())
|
||||
v.SetInt("NGoroutines", goruntime.NumGoroutine())
|
||||
v.Set("ExecutionDriver", srv.runtime.ExecutionDriver().Name())
|
||||
v.SetInt("NEventsListener", len(srv.listeners))
|
||||
v.Set("KernelVersion", kernelVersion)
|
||||
v.Set("IndexServerAddress", auth.IndexServerAddress())
|
||||
|
@ -840,13 +841,13 @@ func (srv *Server) ImageHistory(job *engine.Job) engine.Status {
|
|||
return job.Errorf("Usage: %s IMAGE", job.Name)
|
||||
}
|
||||
name := job.Args[0]
|
||||
foundImage, err := srv.runtime.repositories.LookupImage(name)
|
||||
foundImage, err := srv.runtime.Repositories().LookupImage(name)
|
||||
if err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
|
||||
lookupMap := make(map[string][]string)
|
||||
for name, repository := range srv.runtime.repositories.Repositories {
|
||||
for name, repository := range srv.runtime.Repositories().Repositories {
|
||||
for tag, id := range repository {
|
||||
// If the ID already has a reverse lookup, do not update it unless for "latest"
|
||||
if _, exists := lookupMap[id]; !exists {
|
||||
|
@ -891,7 +892,7 @@ func (srv *Server) ContainerTop(job *engine.Job) engine.Status {
|
|||
if !container.State.IsRunning() {
|
||||
return job.Errorf("Container %s is not running", name)
|
||||
}
|
||||
pids, err := srv.runtime.execDriver.GetPidsForContainer(container.ID)
|
||||
pids, err := srv.runtime.ExecutionDriver().GetPidsForContainer(container.ID)
|
||||
if err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
|
@ -984,7 +985,7 @@ func (srv *Server) Containers(job *engine.Job) engine.Status {
|
|||
outs := engine.NewTable("Created", 0)
|
||||
|
||||
names := map[string][]string{}
|
||||
srv.runtime.containerGraph.Walk("/", func(p string, e *graphdb.Entity) error {
|
||||
srv.runtime.ContainerGraph().Walk("/", func(p string, e *graphdb.Entity) error {
|
||||
names[e.ID()] = append(names[e.ID()], p)
|
||||
return nil
|
||||
}, -1)
|
||||
|
@ -1009,7 +1010,7 @@ func (srv *Server) Containers(job *engine.Job) engine.Status {
|
|||
out := &engine.Env{}
|
||||
out.Set("Id", container.ID)
|
||||
out.SetList("Names", names[container.ID])
|
||||
out.Set("Image", srv.runtime.repositories.ImageName(container.Image))
|
||||
out.Set("Image", srv.runtime.Repositories().ImageName(container.Image))
|
||||
if len(container.Args) > 0 {
|
||||
out.Set("Command", fmt.Sprintf("\"%s %s\"", container.Path, strings.Join(container.Args, " ")))
|
||||
} else {
|
||||
|
@ -1067,7 +1068,7 @@ func (srv *Server) ImageTag(job *engine.Job) engine.Status {
|
|||
if len(job.Args) == 3 {
|
||||
tag = job.Args[2]
|
||||
}
|
||||
if err := srv.runtime.repositories.Set(job.Args[1], tag, job.Args[0], job.GetenvBool("force")); err != nil {
|
||||
if err := srv.runtime.Repositories().Set(job.Args[1], tag, job.Args[0], job.GetenvBool("force")); err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
return engine.StatusOK
|
||||
|
@ -1092,7 +1093,7 @@ func (srv *Server) pullImage(r *registry.Registry, out io.Writer, imgID, endpoin
|
|||
}
|
||||
defer srv.poolRemove("pull", "layer:"+id)
|
||||
|
||||
if !srv.runtime.graph.Exists(id) {
|
||||
if !srv.runtime.Graph().Exists(id) {
|
||||
out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling metadata", nil))
|
||||
imgJSON, imgSize, err := r.GetRemoteImageJSON(id, endpoint, token)
|
||||
if err != nil {
|
||||
|
@ -1114,7 +1115,7 @@ func (srv *Server) pullImage(r *registry.Registry, out io.Writer, imgID, endpoin
|
|||
return err
|
||||
}
|
||||
defer layer.Close()
|
||||
if err := srv.runtime.graph.Register(imgJSON, utils.ProgressReader(layer, imgSize, out, sf, false, utils.TruncateID(id), "Downloading"), img); err != nil {
|
||||
if err := srv.runtime.Graph().Register(imgJSON, utils.ProgressReader(layer, imgSize, out, sf, false, utils.TruncateID(id), "Downloading"), img); err != nil {
|
||||
out.Write(sf.FormatProgress(utils.TruncateID(id), "Error downloading dependent layers", nil))
|
||||
return err
|
||||
}
|
||||
|
@ -1249,11 +1250,11 @@ func (srv *Server) pullRepository(r *registry.Registry, out io.Writer, localName
|
|||
if askedTag != "" && tag != askedTag {
|
||||
continue
|
||||
}
|
||||
if err := srv.runtime.repositories.Set(localName, tag, id, true); err != nil {
|
||||
if err := srv.runtime.Repositories().Set(localName, tag, id, true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := srv.runtime.repositories.Save(); err != nil {
|
||||
if err := srv.runtime.Repositories().Save(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -1374,7 +1375,7 @@ func (srv *Server) getImageList(localRepo map[string]string) ([]string, map[stri
|
|||
|
||||
tagsByImage[id] = append(tagsByImage[id], tag)
|
||||
|
||||
for img, err := srv.runtime.graph.Get(id); img != nil; img, err = img.GetParent() {
|
||||
for img, err := srv.runtime.Graph().Get(id); img != nil; img, err = img.GetParent() {
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
@ -1481,7 +1482,7 @@ func (srv *Server) pushRepository(r *registry.Registry, out io.Writer, localName
|
|||
|
||||
func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgID, ep string, token []string, sf *utils.StreamFormatter) (checksum string, err error) {
|
||||
out = utils.NewWriteFlusher(out)
|
||||
jsonRaw, err := ioutil.ReadFile(path.Join(srv.runtime.graph.Root, imgID, "json"))
|
||||
jsonRaw, err := ioutil.ReadFile(path.Join(srv.runtime.Graph().Root, imgID, "json"))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Cannot retrieve the path for {%s}: %s", imgID, err)
|
||||
}
|
||||
|
@ -1500,7 +1501,7 @@ func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgID,
|
|||
return "", err
|
||||
}
|
||||
|
||||
layerData, err := srv.runtime.graph.TempLayerArchive(imgID, archive.Uncompressed, sf, out)
|
||||
layerData, err := srv.runtime.Graph().TempLayerArchive(imgID, archive.Uncompressed, sf, out)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Failed to generate layer archive: %s", err)
|
||||
}
|
||||
|
@ -1552,17 +1553,17 @@ func (srv *Server) ImagePush(job *engine.Job) engine.Status {
|
|||
return job.Error(err)
|
||||
}
|
||||
|
||||
img, err := srv.runtime.graph.Get(localName)
|
||||
img, err := srv.runtime.Graph().Get(localName)
|
||||
r, err2 := registry.NewRegistry(authConfig, srv.HTTPRequestFactory(metaHeaders), endpoint)
|
||||
if err2 != nil {
|
||||
return job.Error(err2)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
reposLen := len(srv.runtime.repositories.Repositories[localName])
|
||||
reposLen := len(srv.runtime.Repositories().Repositories[localName])
|
||||
job.Stdout.Write(sf.FormatStatus("", "The push refers to a repository [%s] (len: %d)", localName, reposLen))
|
||||
// If it fails, try to get the repository
|
||||
if localRepo, exists := srv.runtime.repositories.Repositories[localName]; exists {
|
||||
if localRepo, exists := srv.runtime.Repositories().Repositories[localName]; exists {
|
||||
if err := srv.pushRepository(r, job.Stdout, localName, remoteName, localRepo, sf); err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
|
@ -1618,13 +1619,13 @@ func (srv *Server) ImageImport(job *engine.Job) engine.Status {
|
|||
defer progressReader.Close()
|
||||
archive = progressReader
|
||||
}
|
||||
img, err := srv.runtime.graph.Create(archive, "", "", "Imported from "+src, "", nil, nil)
|
||||
img, err := srv.runtime.Graph().Create(archive, "", "", "Imported from "+src, "", nil, nil)
|
||||
if err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
// Optionally register the image at REPO/TAG
|
||||
if repo != "" {
|
||||
if err := srv.runtime.repositories.Set(repo, tag, img.ID, true); err != nil {
|
||||
if err := srv.runtime.Repositories().Set(repo, tag, img.ID, true); err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
}
|
||||
|
@ -1643,11 +1644,11 @@ func (srv *Server) ContainerCreate(job *engine.Job) engine.Status {
|
|||
if config.Memory != 0 && config.Memory < 524288 {
|
||||
return job.Errorf("Minimum memory limit allowed is 512k")
|
||||
}
|
||||
if config.Memory > 0 && !srv.runtime.sysInfo.MemoryLimit {
|
||||
if config.Memory > 0 && !srv.runtime.SystemConfig().MemoryLimit {
|
||||
job.Errorf("Your kernel does not support memory limit capabilities. Limitation discarded.\n")
|
||||
config.Memory = 0
|
||||
}
|
||||
if config.Memory > 0 && !srv.runtime.sysInfo.SwapLimit {
|
||||
if config.Memory > 0 && !srv.runtime.SystemConfig().SwapLimit {
|
||||
job.Errorf("Your kernel does not support swap limit capabilities. Limitation discarded.\n")
|
||||
config.MemorySwap = -1
|
||||
}
|
||||
|
@ -1655,14 +1656,14 @@ func (srv *Server) ContainerCreate(job *engine.Job) engine.Status {
|
|||
if err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
if !config.NetworkDisabled && len(config.Dns) == 0 && len(srv.runtime.config.Dns) == 0 && utils.CheckLocalDns(resolvConf) {
|
||||
job.Errorf("Local (127.0.0.1) DNS resolver found in resolv.conf and containers can't use it. Using default external servers : %v\n", defaultDns)
|
||||
config.Dns = defaultDns
|
||||
if !config.NetworkDisabled && len(config.Dns) == 0 && len(srv.runtime.Config().Dns) == 0 && utils.CheckLocalDns(resolvConf) {
|
||||
job.Errorf("Local (127.0.0.1) DNS resolver found in resolv.conf and containers can't use it. Using default external servers : %v\n", runtime.DefaultDns)
|
||||
config.Dns = runtime.DefaultDns
|
||||
}
|
||||
|
||||
container, buildWarnings, err := srv.runtime.Create(config, name)
|
||||
if err != nil {
|
||||
if srv.runtime.graph.IsNotExist(err) {
|
||||
if srv.runtime.Graph().IsNotExist(err) {
|
||||
_, tag := utils.ParseRepositoryTag(config.Image)
|
||||
if tag == "" {
|
||||
tag = graph.DEFAULTTAG
|
||||
|
@ -1671,10 +1672,10 @@ func (srv *Server) ContainerCreate(job *engine.Job) engine.Status {
|
|||
}
|
||||
return job.Error(err)
|
||||
}
|
||||
if !container.Config.NetworkDisabled && srv.runtime.sysInfo.IPv4ForwardingDisabled {
|
||||
if !container.Config.NetworkDisabled && srv.runtime.SystemConfig().IPv4ForwardingDisabled {
|
||||
job.Errorf("IPv4 forwarding is disabled.\n")
|
||||
}
|
||||
srv.LogEvent("create", container.ID, srv.runtime.repositories.ImageName(container.Image))
|
||||
srv.LogEvent("create", container.ID, srv.runtime.Repositories().ImageName(container.Image))
|
||||
// FIXME: this is necessary because runtime.Create might return a nil container
|
||||
// with a non-nil error. This should not happen! Once it's fixed we
|
||||
// can remove this workaround.
|
||||
|
@ -1702,7 +1703,7 @@ func (srv *Server) ContainerRestart(job *engine.Job) engine.Status {
|
|||
if err := container.Restart(int(t)); err != nil {
|
||||
return job.Errorf("Cannot restart container %s: %s\n", name, err)
|
||||
}
|
||||
srv.LogEvent("restart", container.ID, srv.runtime.repositories.ImageName(container.Image))
|
||||
srv.LogEvent("restart", container.ID, srv.runtime.Repositories().ImageName(container.Image))
|
||||
} else {
|
||||
return job.Errorf("No such container: %s\n", name)
|
||||
}
|
||||
|
@ -1724,7 +1725,7 @@ func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status {
|
|||
if container == nil {
|
||||
return job.Errorf("No such link: %s", name)
|
||||
}
|
||||
name, err := getFullName(name)
|
||||
name, err := runtime.GetFullContainerName(name)
|
||||
if err != nil {
|
||||
job.Error(err)
|
||||
}
|
||||
|
@ -1732,21 +1733,17 @@ func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status {
|
|||
if parent == "/" {
|
||||
return job.Errorf("Conflict, cannot remove the default name of the container")
|
||||
}
|
||||
pe := srv.runtime.containerGraph.Get(parent)
|
||||
pe := srv.runtime.ContainerGraph().Get(parent)
|
||||
if pe == nil {
|
||||
return job.Errorf("Cannot get parent %s for name %s", parent, name)
|
||||
}
|
||||
parentContainer := srv.runtime.Get(pe.ID())
|
||||
|
||||
if parentContainer != nil && parentContainer.activeLinks != nil {
|
||||
if link, exists := parentContainer.activeLinks[n]; exists {
|
||||
link.Disable()
|
||||
} else {
|
||||
utils.Debugf("Could not find active link for %s", name)
|
||||
}
|
||||
if parentContainer != nil {
|
||||
parentContainer.DisableLink(n)
|
||||
}
|
||||
|
||||
if err := srv.runtime.containerGraph.Delete(name); err != nil {
|
||||
if err := srv.runtime.ContainerGraph().Delete(name); err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
return engine.StatusOK
|
||||
|
@ -1765,13 +1762,13 @@ func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status {
|
|||
if err := srv.runtime.Destroy(container); err != nil {
|
||||
return job.Errorf("Cannot destroy container %s: %s", name, err)
|
||||
}
|
||||
srv.LogEvent("destroy", container.ID, srv.runtime.repositories.ImageName(container.Image))
|
||||
srv.LogEvent("destroy", container.ID, srv.runtime.Repositories().ImageName(container.Image))
|
||||
|
||||
if removeVolume {
|
||||
var (
|
||||
volumes = make(map[string]struct{})
|
||||
binds = make(map[string]struct{})
|
||||
usedVolumes = make(map[string]*Container)
|
||||
usedVolumes = make(map[string]*runtime.Container)
|
||||
)
|
||||
|
||||
// the volume id is always the base of the path
|
||||
|
@ -1780,7 +1777,7 @@ func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status {
|
|||
}
|
||||
|
||||
// populate bind map so that they can be skipped and not removed
|
||||
for _, bind := range container.hostConfig.Binds {
|
||||
for _, bind := range container.HostConfig().Binds {
|
||||
source := strings.Split(bind, ":")[0]
|
||||
// TODO: refactor all volume stuff, all of it
|
||||
// this is very important that we eval the link
|
||||
|
@ -1819,7 +1816,7 @@ func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status {
|
|||
log.Printf("The volume %s is used by the container %s. Impossible to remove it. Skipping.\n", volumeId, c.ID)
|
||||
continue
|
||||
}
|
||||
if err := srv.runtime.volumes.Delete(volumeId); err != nil {
|
||||
if err := srv.runtime.Volumes().Delete(volumeId); err != nil {
|
||||
return job.Errorf("Error calling volumes.Delete(%q): %v", volumeId, err)
|
||||
}
|
||||
}
|
||||
|
@ -1841,9 +1838,9 @@ func (srv *Server) DeleteImage(name string, imgs *engine.Table, first, force boo
|
|||
tag = graph.DEFAULTTAG
|
||||
}
|
||||
|
||||
img, err := srv.runtime.repositories.LookupImage(name)
|
||||
img, err := srv.runtime.Repositories().LookupImage(name)
|
||||
if err != nil {
|
||||
if r, _ := srv.runtime.repositories.Get(repoName); r != nil {
|
||||
if r, _ := srv.runtime.Repositories().Get(repoName); r != nil {
|
||||
return fmt.Errorf("No such image: %s:%s", repoName, tag)
|
||||
}
|
||||
return fmt.Errorf("No such image: %s", name)
|
||||
|
@ -1854,14 +1851,14 @@ func (srv *Server) DeleteImage(name string, imgs *engine.Table, first, force boo
|
|||
tag = ""
|
||||
}
|
||||
|
||||
byParents, err := srv.runtime.graph.ByParent()
|
||||
byParents, err := srv.runtime.Graph().ByParent()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//If delete by id, see if the id belong only to one repository
|
||||
if repoName == "" {
|
||||
for _, repoAndTag := range srv.runtime.repositories.ByID()[img.ID] {
|
||||
for _, repoAndTag := range srv.runtime.Repositories().ByID()[img.ID] {
|
||||
parsedRepo, parsedTag := utils.ParseRepositoryTag(repoAndTag)
|
||||
if repoName == "" || repoName == parsedRepo {
|
||||
repoName = parsedRepo
|
||||
|
@ -1884,7 +1881,7 @@ func (srv *Server) DeleteImage(name string, imgs *engine.Table, first, force boo
|
|||
|
||||
//Untag the current image
|
||||
for _, tag := range tags {
|
||||
tagDeleted, err := srv.runtime.repositories.Delete(repoName, tag)
|
||||
tagDeleted, err := srv.runtime.Repositories().Delete(repoName, tag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1895,16 +1892,16 @@ func (srv *Server) DeleteImage(name string, imgs *engine.Table, first, force boo
|
|||
srv.LogEvent("untag", img.ID, "")
|
||||
}
|
||||
}
|
||||
tags = srv.runtime.repositories.ByID()[img.ID]
|
||||
tags = srv.runtime.Repositories().ByID()[img.ID]
|
||||
if (len(tags) <= 1 && repoName == "") || len(tags) == 0 {
|
||||
if len(byParents[img.ID]) == 0 {
|
||||
if err := srv.canDeleteImage(img.ID); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := srv.runtime.repositories.DeleteAll(img.ID); err != nil {
|
||||
if err := srv.runtime.Repositories().DeleteAll(img.ID); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := srv.runtime.graph.Delete(img.ID); err != nil {
|
||||
if err := srv.runtime.Graph().Delete(img.ID); err != nil {
|
||||
return err
|
||||
}
|
||||
out := &engine.Env{}
|
||||
|
@ -1943,7 +1940,7 @@ func (srv *Server) ImageDelete(job *engine.Job) engine.Status {
|
|||
|
||||
func (srv *Server) canDeleteImage(imgID string) error {
|
||||
for _, container := range srv.runtime.List() {
|
||||
parent, err := srv.runtime.repositories.LookupImage(container.Image)
|
||||
parent, err := srv.runtime.Repositories().LookupImage(container.Image)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1963,7 +1960,7 @@ func (srv *Server) canDeleteImage(imgID string) error {
|
|||
func (srv *Server) ImageGetCached(imgID string, config *runconfig.Config) (*image.Image, error) {
|
||||
|
||||
// Retrieve all images
|
||||
images, err := srv.runtime.graph.Map()
|
||||
images, err := srv.runtime.Graph().Map()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -1980,7 +1977,7 @@ func (srv *Server) ImageGetCached(imgID string, config *runconfig.Config) (*imag
|
|||
// Loop on the children of the given image and check the config
|
||||
var match *image.Image
|
||||
for elem := range imageMap[imgID] {
|
||||
img, err := srv.runtime.graph.Get(elem)
|
||||
img, err := srv.runtime.Graph().Get(elem)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -1993,7 +1990,7 @@ func (srv *Server) ImageGetCached(imgID string, config *runconfig.Config) (*imag
|
|||
return match, nil
|
||||
}
|
||||
|
||||
func (srv *Server) RegisterLinks(container *Container, hostConfig *runconfig.HostConfig) error {
|
||||
func (srv *Server) RegisterLinks(container *runtime.Container, hostConfig *runconfig.HostConfig) error {
|
||||
runtime := srv.runtime
|
||||
|
||||
if hostConfig != nil && hostConfig.Links != nil {
|
||||
|
@ -2017,7 +2014,7 @@ func (srv *Server) RegisterLinks(container *Container, hostConfig *runconfig.Hos
|
|||
// After we load all the links into the runtime
|
||||
// set them to nil on the hostconfig
|
||||
hostConfig.Links = nil
|
||||
if err := container.writeHostConfig(); err != nil {
|
||||
if err := container.WriteHostConfig(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -2065,13 +2062,13 @@ func (srv *Server) ContainerStart(job *engine.Job) engine.Status {
|
|||
if err := srv.RegisterLinks(container, hostConfig); err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
container.hostConfig = hostConfig
|
||||
container.SetHostConfig(hostConfig)
|
||||
container.ToDisk()
|
||||
}
|
||||
if err := container.Start(); err != nil {
|
||||
return job.Errorf("Cannot start container %s: %s", name, err)
|
||||
}
|
||||
srv.LogEvent("start", container.ID, runtime.repositories.ImageName(container.Image))
|
||||
srv.LogEvent("start", container.ID, runtime.Repositories().ImageName(container.Image))
|
||||
|
||||
return engine.StatusOK
|
||||
}
|
||||
|
@ -2091,7 +2088,7 @@ func (srv *Server) ContainerStop(job *engine.Job) engine.Status {
|
|||
if err := container.Stop(int(t)); err != nil {
|
||||
return job.Errorf("Cannot stop container %s: %s\n", name, err)
|
||||
}
|
||||
srv.LogEvent("stop", container.ID, srv.runtime.repositories.ImageName(container.Image))
|
||||
srv.LogEvent("stop", container.ID, srv.runtime.Repositories().ImageName(container.Image))
|
||||
} else {
|
||||
return job.Errorf("No such container: %s\n", name)
|
||||
}
|
||||
|
@ -2237,7 +2234,7 @@ func (srv *Server) ContainerAttach(job *engine.Job) engine.Status {
|
|||
return engine.StatusOK
|
||||
}
|
||||
|
||||
func (srv *Server) ContainerInspect(name string) (*Container, error) {
|
||||
func (srv *Server) ContainerInspect(name string) (*runtime.Container, error) {
|
||||
if container := srv.runtime.Get(name); container != nil {
|
||||
return container, nil
|
||||
}
|
||||
|
@ -2245,7 +2242,7 @@ func (srv *Server) ContainerInspect(name string) (*Container, error) {
|
|||
}
|
||||
|
||||
func (srv *Server) ImageInspect(name string) (*image.Image, error) {
|
||||
if image, err := srv.runtime.repositories.LookupImage(name); err == nil && image != nil {
|
||||
if image, err := srv.runtime.Repositories().LookupImage(name); err == nil && image != nil {
|
||||
return image, nil
|
||||
}
|
||||
return nil, fmt.Errorf("No such image: %s", name)
|
||||
|
@ -2280,9 +2277,9 @@ func (srv *Server) JobInspect(job *engine.Job) engine.Status {
|
|||
return job.Error(errContainer)
|
||||
}
|
||||
object = &struct {
|
||||
*Container
|
||||
*runtime.Container
|
||||
HostConfig *runconfig.HostConfig
|
||||
}{container, container.hostConfig}
|
||||
}{container, container.HostConfig()}
|
||||
default:
|
||||
return job.Errorf("Unknown kind: %s", kind)
|
||||
}
|
||||
|
@ -2322,7 +2319,7 @@ func (srv *Server) ContainerCopy(job *engine.Job) engine.Status {
|
|||
}
|
||||
|
||||
func NewServer(eng *engine.Engine, config *daemonconfig.Config) (*Server, error) {
|
||||
runtime, err := NewRuntime(config, eng)
|
||||
runtime, err := runtime.NewRuntime(config, eng)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -2335,7 +2332,7 @@ func NewServer(eng *engine.Engine, config *daemonconfig.Config) (*Server, error)
|
|||
listeners: make(map[string]chan utils.JSONMessage),
|
||||
running: true,
|
||||
}
|
||||
runtime.srv = srv
|
||||
runtime.SetServer(srv)
|
||||
return srv, nil
|
||||
}
|
||||
|
||||
|
@ -2403,7 +2400,7 @@ func (srv *Server) Close() error {
|
|||
|
||||
type Server struct {
|
||||
sync.RWMutex
|
||||
runtime *Runtime
|
||||
runtime *runtime.Runtime
|
||||
pullingPool map[string]chan struct{}
|
||||
pushingPool map[string]chan struct{}
|
||||
events []utils.JSONMessage
|
||||
|
|
40
utils.go
40
utils.go
|
@ -2,9 +2,6 @@ package docker
|
|||
|
||||
import (
|
||||
"github.com/dotcloud/docker/archive"
|
||||
"github.com/dotcloud/docker/nat"
|
||||
"github.com/dotcloud/docker/pkg/namesgenerator"
|
||||
"github.com/dotcloud/docker/runconfig"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
)
|
||||
|
||||
|
@ -12,45 +9,8 @@ type Change struct {
|
|||
archive.Change
|
||||
}
|
||||
|
||||
func migratePortMappings(config *runconfig.Config, hostConfig *runconfig.HostConfig) error {
|
||||
if config.PortSpecs != nil {
|
||||
ports, bindings, err := nat.ParsePortSpecs(config.PortSpecs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
config.PortSpecs = nil
|
||||
if len(bindings) > 0 {
|
||||
if hostConfig == nil {
|
||||
hostConfig = &runconfig.HostConfig{}
|
||||
}
|
||||
hostConfig.PortBindings = bindings
|
||||
}
|
||||
|
||||
if config.ExposedPorts == nil {
|
||||
config.ExposedPorts = make(nat.PortSet, len(ports))
|
||||
}
|
||||
for k, v := range ports {
|
||||
config.ExposedPorts[k] = v
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Links come in the format of
|
||||
// name:alias
|
||||
func parseLink(rawLink string) (map[string]string, error) {
|
||||
return utils.PartParser("name:alias", rawLink)
|
||||
}
|
||||
|
||||
type checker struct {
|
||||
runtime *Runtime
|
||||
}
|
||||
|
||||
func (c *checker) Exists(name string) bool {
|
||||
return c.runtime.containerGraph.Exists("/" + name)
|
||||
}
|
||||
|
||||
// Generate a random and unique name
|
||||
func generateRandomName(runtime *Runtime) (string, error) {
|
||||
return namesgenerator.GenerateRandomName(&checker{runtime})
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue