moby--moby/libnetwork/portallocator/portallocator.go

247 lines
6.1 KiB
Go
Raw Normal View History

package portallocator
import (
"errors"
"fmt"
"net"
"sync"
"github.com/sirupsen/logrus"
)
type ipMapping map[string]protoMap
var (
// ErrAllPortsAllocated is returned when no more ports are available
ErrAllPortsAllocated = errors.New("all ports are allocated")
// ErrUnknownProtocol is returned when an unknown protocol was specified
ErrUnknownProtocol = errors.New("unknown protocol")
defaultIP = net.ParseIP("0.0.0.0")
Port Allocator as a libnetwork package DESCRIPTION: As part of bringing libnetwork bridge driver features in parity with docker/daemon/network/driver/bridge features (Issue #46), this commit addresses the bridge.RequestPort() API. Currenlty docker/api/server.go needs an hold of port allocator in order to reserve a transport port which will be used by the http server on the host machine, so that portallocator does not give out that port when queried by portmapper as part of network driver operations. ISSUE: Current implementation in docker is server.go directly access portmapper and then portallocator from bridge pkg calling bridge.RequestPort(). This also forces that function to trigger portmapper initialization (in case bridge init() was not executed yet), while portmapper life cycle should only be controlled by bridge network driver. We cannot mantain this behavior with libnetwrok as this violates the modularization of networking code which libnetwork is bringing in. FIX: Make portallocator a singleton, now both docker core and portmapper code can initialize it and get the only one instance (Change in docker core code will happen when docker code will migrate to use libnetwork), given it is being used for host specific needs. NOTE: Long term fix is having multiple portallocator instances (so no more singleton) each capable to be in sync with OS regarding current port allocation. When this change comes, no change whould be required on portallocator' clients side, changes will be confined to portallocator package. Signed-off-by: Alessandro Boch <aboch@docker.com>
2015-04-16 22:16:11 +00:00
once sync.Once
instance *PortAllocator
)
// ErrPortAlreadyAllocated is the returned error information when a requested port is already being used
type ErrPortAlreadyAllocated struct {
ip string
port int
}
func newErrPortAlreadyAllocated(ip string, port int) ErrPortAlreadyAllocated {
return ErrPortAlreadyAllocated{
ip: ip,
port: port,
}
}
// IP returns the address to which the used port is associated
func (e ErrPortAlreadyAllocated) IP() string {
return e.ip
}
// Port returns the value of the already used port
func (e ErrPortAlreadyAllocated) Port() int {
return e.port
}
// IPPort returns the address and the port in the form ip:port
func (e ErrPortAlreadyAllocated) IPPort() string {
return fmt.Sprintf("%s:%d", e.ip, e.port)
}
// Error is the implementation of error.Error interface
func (e ErrPortAlreadyAllocated) Error() string {
return fmt.Sprintf("Bind for %s:%d failed: port is already allocated", e.ip, e.port)
}
type (
// PortAllocator manages the transport ports database
PortAllocator struct {
mutex sync.Mutex
ipMap ipMapping
Begin int
End int
}
portRange struct {
begin int
end int
last int
}
portMap struct {
p map[int]struct{}
defaultRange string
portRanges map[string]*portRange
}
protoMap map[string]*portMap
)
// Get returns the default instance of PortAllocator
func Get() *PortAllocator {
Port Allocator as a libnetwork package DESCRIPTION: As part of bringing libnetwork bridge driver features in parity with docker/daemon/network/driver/bridge features (Issue #46), this commit addresses the bridge.RequestPort() API. Currenlty docker/api/server.go needs an hold of port allocator in order to reserve a transport port which will be used by the http server on the host machine, so that portallocator does not give out that port when queried by portmapper as part of network driver operations. ISSUE: Current implementation in docker is server.go directly access portmapper and then portallocator from bridge pkg calling bridge.RequestPort(). This also forces that function to trigger portmapper initialization (in case bridge init() was not executed yet), while portmapper life cycle should only be controlled by bridge network driver. We cannot mantain this behavior with libnetwrok as this violates the modularization of networking code which libnetwork is bringing in. FIX: Make portallocator a singleton, now both docker core and portmapper code can initialize it and get the only one instance (Change in docker core code will happen when docker code will migrate to use libnetwork), given it is being used for host specific needs. NOTE: Long term fix is having multiple portallocator instances (so no more singleton) each capable to be in sync with OS regarding current port allocation. When this change comes, no change whould be required on portallocator' clients side, changes will be confined to portallocator package. Signed-off-by: Alessandro Boch <aboch@docker.com>
2015-04-16 22:16:11 +00:00
// Port Allocator is a singleton
// Note: Long term solution will be each PortAllocator will have access to
// the OS so that it can have up to date view of the OS port allocation.
// When this happens singleton behavior will be removed. Clients do not
// need to worry about this, they will not see a change in behavior.
once.Do(func() {
instance = newInstance()
})
Port Allocator as a libnetwork package DESCRIPTION: As part of bringing libnetwork bridge driver features in parity with docker/daemon/network/driver/bridge features (Issue #46), this commit addresses the bridge.RequestPort() API. Currenlty docker/api/server.go needs an hold of port allocator in order to reserve a transport port which will be used by the http server on the host machine, so that portallocator does not give out that port when queried by portmapper as part of network driver operations. ISSUE: Current implementation in docker is server.go directly access portmapper and then portallocator from bridge pkg calling bridge.RequestPort(). This also forces that function to trigger portmapper initialization (in case bridge init() was not executed yet), while portmapper life cycle should only be controlled by bridge network driver. We cannot mantain this behavior with libnetwrok as this violates the modularization of networking code which libnetwork is bringing in. FIX: Make portallocator a singleton, now both docker core and portmapper code can initialize it and get the only one instance (Change in docker core code will happen when docker code will migrate to use libnetwork), given it is being used for host specific needs. NOTE: Long term fix is having multiple portallocator instances (so no more singleton) each capable to be in sync with OS regarding current port allocation. When this change comes, no change whould be required on portallocator' clients side, changes will be confined to portallocator package. Signed-off-by: Alessandro Boch <aboch@docker.com>
2015-04-16 22:16:11 +00:00
return instance
}
func newInstance() *PortAllocator {
start, end, err := getDynamicPortRange()
if err != nil {
logrus.WithError(err).Infof("falling back to default port range %d-%d", defaultPortRangeStart, defaultPortRangeEnd)
start, end = defaultPortRangeStart, defaultPortRangeEnd
}
return &PortAllocator{
ipMap: ipMapping{},
Begin: start,
End: end,
}
}
// RequestPort requests new port from global ports pool for specified ip and proto.
// If port is 0 it returns first free port. Otherwise it checks port availability
// in proto's pool and returns that port or error if port is already busy.
func (p *PortAllocator) RequestPort(ip net.IP, proto string, port int) (int, error) {
return p.RequestPortInRange(ip, proto, port, port)
}
// RequestPortInRange requests new port from global ports pool for specified ip and proto.
// If portStart and portEnd are 0 it returns the first free port in the default ephemeral range.
// If portStart != portEnd it returns the first free port in the requested range.
// Otherwise (portStart == portEnd) it checks port availability in the requested proto's port-pool
// and returns that port or error if port is already busy.
func (p *PortAllocator) RequestPortInRange(ip net.IP, proto string, portStart, portEnd int) (int, error) {
p.mutex.Lock()
defer p.mutex.Unlock()
if proto != "tcp" && proto != "udp" && proto != "sctp" {
return 0, ErrUnknownProtocol
}
if ip == nil {
ip = defaultIP
}
ipstr := ip.String()
protomap, ok := p.ipMap[ipstr]
if !ok {
protomap = protoMap{
"tcp": p.newPortMap(),
"udp": p.newPortMap(),
"sctp": p.newPortMap(),
}
p.ipMap[ipstr] = protomap
}
mapping := protomap[proto]
if portStart > 0 && portStart == portEnd {
if _, ok := mapping.p[portStart]; !ok {
mapping.p[portStart] = struct{}{}
return portStart, nil
}
return 0, newErrPortAlreadyAllocated(ipstr, portStart)
}
port, err := mapping.findPort(portStart, portEnd)
if err != nil {
return 0, err
}
return port, nil
}
// ReleasePort releases port from global ports pool for specified ip and proto.
func (p *PortAllocator) ReleasePort(ip net.IP, proto string, port int) error {
p.mutex.Lock()
defer p.mutex.Unlock()
if ip == nil {
ip = defaultIP
}
protomap, ok := p.ipMap[ip.String()]
if !ok {
return nil
}
delete(protomap[proto].p, port)
return nil
}
func (p *PortAllocator) newPortMap() *portMap {
defaultKey := getRangeKey(p.Begin, p.End)
pm := &portMap{
p: map[int]struct{}{},
defaultRange: defaultKey,
portRanges: map[string]*portRange{
defaultKey: newPortRange(p.Begin, p.End),
},
}
return pm
}
// ReleaseAll releases all ports for all ips.
func (p *PortAllocator) ReleaseAll() error {
p.mutex.Lock()
p.ipMap = ipMapping{}
p.mutex.Unlock()
return nil
}
func getRangeKey(portStart, portEnd int) string {
return fmt.Sprintf("%d-%d", portStart, portEnd)
}
func newPortRange(portStart, portEnd int) *portRange {
return &portRange{
begin: portStart,
end: portEnd,
last: portEnd,
}
}
func (pm *portMap) getPortRange(portStart, portEnd int) (*portRange, error) {
var key string
if portStart == 0 && portEnd == 0 {
key = pm.defaultRange
} else {
key = getRangeKey(portStart, portEnd)
if portStart == portEnd ||
portStart == 0 || portEnd == 0 ||
portEnd < portStart {
return nil, fmt.Errorf("invalid port range: %s", key)
}
}
// Return existing port range, if already known.
if pr, exists := pm.portRanges[key]; exists {
return pr, nil
}
// Otherwise create a new port range.
pr := newPortRange(portStart, portEnd)
pm.portRanges[key] = pr
return pr, nil
}
func (pm *portMap) findPort(portStart, portEnd int) (int, error) {
pr, err := pm.getPortRange(portStart, portEnd)
if err != nil {
return 0, err
}
port := pr.last
for i := 0; i <= pr.end-pr.begin; i++ {
port++
if port > pr.end {
port = pr.begin
}
if _, ok := pm.p[port]; !ok {
pm.p[port] = struct{}{}
pr.last = port
return port, nil
}
}
return 0, ErrAllPortsAllocated
}