mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
![Alessandro Boch](/assets/img/avatar_default.png)
DESCRIPTION: As part of bringing libnetwork bridge driver features in parity with docker/daemon/network/driver/bridge features (Issue #46), this commit addresses the bridge.RequestPort() API. Currenlty docker/api/server.go needs an hold of port allocator in order to reserve a transport port which will be used by the http server on the host machine, so that portallocator does not give out that port when queried by portmapper as part of network driver operations. ISSUE: Current implementation in docker is server.go directly access portmapper and then portallocator from bridge pkg calling bridge.RequestPort(). This also forces that function to trigger portmapper initialization (in case bridge init() was not executed yet), while portmapper life cycle should only be controlled by bridge network driver. We cannot mantain this behavior with libnetwrok as this violates the modularization of networking code which libnetwork is bringing in. FIX: Make portallocator a singleton, now both docker core and portmapper code can initialize it and get the only one instance (Change in docker core code will happen when docker code will migrate to use libnetwork), given it is being used for host specific needs. NOTE: Long term fix is having multiple portallocator instances (so no more singleton) each capable to be in sync with OS regarding current port allocation. When this change comes, no change whould be required on portallocator' clients side, changes will be confined to portallocator package. Signed-off-by: Alessandro Boch <aboch@docker.com>
198 lines
5.2 KiB
Go
198 lines
5.2 KiB
Go
package portmapper
|
|
|
|
import (
|
|
"errors"
|
|
"fmt"
|
|
"net"
|
|
"sync"
|
|
|
|
"github.com/Sirupsen/logrus"
|
|
"github.com/docker/docker/pkg/iptables"
|
|
"github.com/docker/libnetwork/pkg/portallocator"
|
|
)
|
|
|
|
type mapping struct {
|
|
proto string
|
|
userlandProxy userlandProxy
|
|
host net.Addr
|
|
container net.Addr
|
|
}
|
|
|
|
var newProxy = newProxyCommand
|
|
|
|
var (
|
|
// ErrUnknownBackendAddressType refers to an unknown container or unsupported address type
|
|
ErrUnknownBackendAddressType = errors.New("unknown container address type not supported")
|
|
// ErrPortMappedForIP refers to a port already mapped to an ip address
|
|
ErrPortMappedForIP = errors.New("port is already mapped to ip")
|
|
// ErrPortNotMapped refers to an unampped port
|
|
ErrPortNotMapped = errors.New("port is not mapped")
|
|
)
|
|
|
|
// PortMapper manages the network address translation
|
|
type PortMapper struct {
|
|
chain *iptables.Chain
|
|
|
|
// udp:ip:port
|
|
currentMappings map[string]*mapping
|
|
lock sync.Mutex
|
|
|
|
Allocator *portallocator.PortAllocator
|
|
}
|
|
|
|
// New returns a new instance of PortMapper
|
|
func New() *PortMapper {
|
|
return NewWithPortAllocator(portallocator.New())
|
|
}
|
|
|
|
// NewWithPortAllocator returns a new instance of PortMapper wich will use the specified PortAllocator
|
|
func NewWithPortAllocator(allocator *portallocator.PortAllocator) *PortMapper {
|
|
return &PortMapper{
|
|
currentMappings: make(map[string]*mapping),
|
|
Allocator: allocator,
|
|
}
|
|
}
|
|
|
|
// SetIptablesChain sets the specified chain into portmapper
|
|
func (pm *PortMapper) SetIptablesChain(c *iptables.Chain) {
|
|
pm.chain = c
|
|
}
|
|
|
|
// Map maps the specified container transport address to the host's network address and transport port
|
|
func (pm *PortMapper) Map(container net.Addr, hostIP net.IP, hostPort int) (host net.Addr, err error) {
|
|
pm.lock.Lock()
|
|
defer pm.lock.Unlock()
|
|
|
|
var (
|
|
m *mapping
|
|
proto string
|
|
allocatedHostPort int
|
|
proxy userlandProxy
|
|
)
|
|
|
|
switch container.(type) {
|
|
case *net.TCPAddr:
|
|
proto = "tcp"
|
|
if allocatedHostPort, err = pm.Allocator.RequestPort(hostIP, proto, hostPort); err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
m = &mapping{
|
|
proto: proto,
|
|
host: &net.TCPAddr{IP: hostIP, Port: allocatedHostPort},
|
|
container: container,
|
|
}
|
|
|
|
proxy = newProxy(proto, hostIP, allocatedHostPort, container.(*net.TCPAddr).IP, container.(*net.TCPAddr).Port)
|
|
case *net.UDPAddr:
|
|
proto = "udp"
|
|
if allocatedHostPort, err = pm.Allocator.RequestPort(hostIP, proto, hostPort); err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
m = &mapping{
|
|
proto: proto,
|
|
host: &net.UDPAddr{IP: hostIP, Port: allocatedHostPort},
|
|
container: container,
|
|
}
|
|
|
|
proxy = newProxy(proto, hostIP, allocatedHostPort, container.(*net.UDPAddr).IP, container.(*net.UDPAddr).Port)
|
|
default:
|
|
return nil, ErrUnknownBackendAddressType
|
|
}
|
|
|
|
// release the allocated port on any further error during return.
|
|
defer func() {
|
|
if err != nil {
|
|
pm.Allocator.ReleasePort(hostIP, proto, allocatedHostPort)
|
|
}
|
|
}()
|
|
|
|
key := getKey(m.host)
|
|
if _, exists := pm.currentMappings[key]; exists {
|
|
return nil, ErrPortMappedForIP
|
|
}
|
|
|
|
containerIP, containerPort := getIPAndPort(m.container)
|
|
if err := pm.forward(iptables.Append, m.proto, hostIP, allocatedHostPort, containerIP.String(), containerPort); err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
cleanup := func() error {
|
|
// need to undo the iptables rules before we return
|
|
proxy.Stop()
|
|
pm.forward(iptables.Delete, m.proto, hostIP, allocatedHostPort, containerIP.String(), containerPort)
|
|
if err := pm.Allocator.ReleasePort(hostIP, m.proto, allocatedHostPort); err != nil {
|
|
return err
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
if err := proxy.Start(); err != nil {
|
|
if err := cleanup(); err != nil {
|
|
return nil, fmt.Errorf("Error during port allocation cleanup: %v", err)
|
|
}
|
|
return nil, err
|
|
}
|
|
m.userlandProxy = proxy
|
|
pm.currentMappings[key] = m
|
|
return m.host, nil
|
|
}
|
|
|
|
// Unmap removes stored mapping for the specified host transport address
|
|
func (pm *PortMapper) Unmap(host net.Addr) error {
|
|
pm.lock.Lock()
|
|
defer pm.lock.Unlock()
|
|
|
|
key := getKey(host)
|
|
data, exists := pm.currentMappings[key]
|
|
if !exists {
|
|
return ErrPortNotMapped
|
|
}
|
|
|
|
data.userlandProxy.Stop()
|
|
|
|
delete(pm.currentMappings, key)
|
|
|
|
containerIP, containerPort := getIPAndPort(data.container)
|
|
hostIP, hostPort := getIPAndPort(data.host)
|
|
if err := pm.forward(iptables.Delete, data.proto, hostIP, hostPort, containerIP.String(), containerPort); err != nil {
|
|
logrus.Errorf("Error on iptables delete: %s", err)
|
|
}
|
|
|
|
switch a := host.(type) {
|
|
case *net.TCPAddr:
|
|
return pm.Allocator.ReleasePort(a.IP, "tcp", a.Port)
|
|
case *net.UDPAddr:
|
|
return pm.Allocator.ReleasePort(a.IP, "udp", a.Port)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func getKey(a net.Addr) string {
|
|
switch t := a.(type) {
|
|
case *net.TCPAddr:
|
|
return fmt.Sprintf("%s:%d/%s", t.IP.String(), t.Port, "tcp")
|
|
case *net.UDPAddr:
|
|
return fmt.Sprintf("%s:%d/%s", t.IP.String(), t.Port, "udp")
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func getIPAndPort(a net.Addr) (net.IP, int) {
|
|
switch t := a.(type) {
|
|
case *net.TCPAddr:
|
|
return t.IP, t.Port
|
|
case *net.UDPAddr:
|
|
return t.IP, t.Port
|
|
}
|
|
return nil, 0
|
|
}
|
|
|
|
func (pm *PortMapper) forward(action iptables.Action, proto string, sourceIP net.IP, sourcePort int, containerIP string, containerPort int) error {
|
|
if pm.chain == nil {
|
|
return nil
|
|
}
|
|
return pm.chain.Forward(action, sourceIP, sourcePort, proto, containerIP, containerPort)
|
|
}
|