mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
b052827e02
When firewalld (or iptables service) restarts/reloads, all previously added docker firewall rules are flushed. With firewalld we can react to its Reloaded() [1] D-Bus signal and recreate the firewall rules. Also when firewalld gets restarted (stopped & started) we can catch the NameOwnerChanged signal [2]. To specify which signals we want to react to we use AddMatch [3]. Libvirt has been doing this for quite a long time now. Docker changes firewall rules on basically 3 places. 1) daemon/networkdriver/portmapper/mapper.go - port mappings Portmapper fortunatelly keeps list of mapped ports, so we can easily recreate firewall rules on firewalld restart/reload New ReMapAll() function does that 2) daemon/networkdriver/bridge/driver.go When setting a bridge, basic firewall rules are created. This is done at once during start, it's parametrized and nowhere tracked so how can one know what and how to set it again when there's been firewalld restart/reload ? The only solution that came to my mind is using of closures [4], i.e. I keep list of references to closures (anonymous functions together with a referencing environment) and when there's firewalld restart/reload I re-call them in the same order. 3) links/links.go - linking containers Link is added in Enable() and removed in Disable(). In Enable() we add a callback function, which creates the link, that's OK so far. It'd be ideal if we could remove the same function from the list in Disable(). Unfortunatelly that's not possible AFAICT, because we don't know the reference to that function at that moment, so we can only add a reference to function, which removes the link. That means that after creating and removing a link there are 2 functions in the list, one adding and one removing the link and after firewalld restart/reload both are called. It works, but it's far from ideal. [1] https://jpopelka.fedorapeople.org/firewalld/doc/firewalld.dbus.html#FirewallD1.Signals.Reloaded [2] http://dbus.freedesktop.org/doc/dbus-specification.html#bus-messages-name-owner-changed [3] http://dbus.freedesktop.org/doc/dbus-specification.html#message-bus-routing-match-rules [4] https://en.wikipedia.org/wiki/Closure_%28computer_programming%29 Signed-off-by: Jiri Popelka <jpopelka@redhat.com>
201 lines
5.1 KiB
Go
201 lines
5.1 KiB
Go
package portmapper
|
|
|
|
import (
|
|
"errors"
|
|
"fmt"
|
|
"net"
|
|
"sync"
|
|
|
|
"github.com/Sirupsen/logrus"
|
|
"github.com/docker/docker/daemon/networkdriver/portallocator"
|
|
"github.com/docker/docker/pkg/iptables"
|
|
)
|
|
|
|
type mapping struct {
|
|
proto string
|
|
userlandProxy UserlandProxy
|
|
host net.Addr
|
|
container net.Addr
|
|
}
|
|
|
|
var NewProxy = NewProxyCommand
|
|
|
|
var (
|
|
ErrUnknownBackendAddressType = errors.New("unknown container address type not supported")
|
|
ErrPortMappedForIP = errors.New("port is already mapped to ip")
|
|
ErrPortNotMapped = errors.New("port is not mapped")
|
|
)
|
|
|
|
type PortMapper struct {
|
|
chain *iptables.Chain
|
|
|
|
// udp:ip:port
|
|
currentMappings map[string]*mapping
|
|
lock sync.Mutex
|
|
|
|
Allocator *portallocator.PortAllocator
|
|
}
|
|
|
|
func New() *PortMapper {
|
|
return NewWithPortAllocator(portallocator.New())
|
|
}
|
|
|
|
func NewWithPortAllocator(allocator *portallocator.PortAllocator) *PortMapper {
|
|
return &PortMapper{
|
|
currentMappings: make(map[string]*mapping),
|
|
Allocator: allocator,
|
|
}
|
|
}
|
|
|
|
func (pm *PortMapper) SetIptablesChain(c *iptables.Chain) {
|
|
pm.chain = c
|
|
}
|
|
|
|
func (pm *PortMapper) Map(container net.Addr, hostIP net.IP, hostPort int) (host net.Addr, err error) {
|
|
pm.lock.Lock()
|
|
defer pm.lock.Unlock()
|
|
|
|
var (
|
|
m *mapping
|
|
proto string
|
|
allocatedHostPort int
|
|
proxy UserlandProxy
|
|
)
|
|
|
|
switch container.(type) {
|
|
case *net.TCPAddr:
|
|
proto = "tcp"
|
|
if allocatedHostPort, err = pm.Allocator.RequestPort(hostIP, proto, hostPort); err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
m = &mapping{
|
|
proto: proto,
|
|
host: &net.TCPAddr{IP: hostIP, Port: allocatedHostPort},
|
|
container: container,
|
|
}
|
|
|
|
proxy = NewProxy(proto, hostIP, allocatedHostPort, container.(*net.TCPAddr).IP, container.(*net.TCPAddr).Port)
|
|
case *net.UDPAddr:
|
|
proto = "udp"
|
|
if allocatedHostPort, err = pm.Allocator.RequestPort(hostIP, proto, hostPort); err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
m = &mapping{
|
|
proto: proto,
|
|
host: &net.UDPAddr{IP: hostIP, Port: allocatedHostPort},
|
|
container: container,
|
|
}
|
|
|
|
proxy = NewProxy(proto, hostIP, allocatedHostPort, container.(*net.UDPAddr).IP, container.(*net.UDPAddr).Port)
|
|
default:
|
|
return nil, ErrUnknownBackendAddressType
|
|
}
|
|
|
|
// release the allocated port on any further error during return.
|
|
defer func() {
|
|
if err != nil {
|
|
pm.Allocator.ReleasePort(hostIP, proto, allocatedHostPort)
|
|
}
|
|
}()
|
|
|
|
key := getKey(m.host)
|
|
if _, exists := pm.currentMappings[key]; exists {
|
|
return nil, ErrPortMappedForIP
|
|
}
|
|
|
|
containerIP, containerPort := getIPAndPort(m.container)
|
|
if err := pm.forward(iptables.Append, m.proto, hostIP, allocatedHostPort, containerIP.String(), containerPort); err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
cleanup := func() error {
|
|
// need to undo the iptables rules before we return
|
|
proxy.Stop()
|
|
pm.forward(iptables.Delete, m.proto, hostIP, allocatedHostPort, containerIP.String(), containerPort)
|
|
if err := pm.Allocator.ReleasePort(hostIP, m.proto, allocatedHostPort); err != nil {
|
|
return err
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
if err := proxy.Start(); err != nil {
|
|
if err := cleanup(); err != nil {
|
|
return nil, fmt.Errorf("Error during port allocation cleanup: %v", err)
|
|
}
|
|
return nil, err
|
|
}
|
|
m.userlandProxy = proxy
|
|
pm.currentMappings[key] = m
|
|
return m.host, nil
|
|
}
|
|
|
|
// re-apply all port mappings
|
|
func (pm *PortMapper) ReMapAll() {
|
|
logrus.Debugln("Re-applying all port mappings.")
|
|
for _, data := range pm.currentMappings {
|
|
containerIP, containerPort := getIPAndPort(data.container)
|
|
hostIP, hostPort := getIPAndPort(data.host)
|
|
if err := pm.forward(iptables.Append, data.proto, hostIP, hostPort, containerIP.String(), containerPort); err != nil {
|
|
logrus.Errorf("Error on iptables add: %s", err)
|
|
}
|
|
}
|
|
}
|
|
|
|
func (pm *PortMapper) Unmap(host net.Addr) error {
|
|
pm.lock.Lock()
|
|
defer pm.lock.Unlock()
|
|
|
|
key := getKey(host)
|
|
data, exists := pm.currentMappings[key]
|
|
if !exists {
|
|
return ErrPortNotMapped
|
|
}
|
|
|
|
data.userlandProxy.Stop()
|
|
|
|
delete(pm.currentMappings, key)
|
|
|
|
containerIP, containerPort := getIPAndPort(data.container)
|
|
hostIP, hostPort := getIPAndPort(data.host)
|
|
if err := pm.forward(iptables.Delete, data.proto, hostIP, hostPort, containerIP.String(), containerPort); err != nil {
|
|
logrus.Errorf("Error on iptables delete: %s", err)
|
|
}
|
|
|
|
switch a := host.(type) {
|
|
case *net.TCPAddr:
|
|
return pm.Allocator.ReleasePort(a.IP, "tcp", a.Port)
|
|
case *net.UDPAddr:
|
|
return pm.Allocator.ReleasePort(a.IP, "udp", a.Port)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func getKey(a net.Addr) string {
|
|
switch t := a.(type) {
|
|
case *net.TCPAddr:
|
|
return fmt.Sprintf("%s:%d/%s", t.IP.String(), t.Port, "tcp")
|
|
case *net.UDPAddr:
|
|
return fmt.Sprintf("%s:%d/%s", t.IP.String(), t.Port, "udp")
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func getIPAndPort(a net.Addr) (net.IP, int) {
|
|
switch t := a.(type) {
|
|
case *net.TCPAddr:
|
|
return t.IP, t.Port
|
|
case *net.UDPAddr:
|
|
return t.IP, t.Port
|
|
}
|
|
return nil, 0
|
|
}
|
|
|
|
func (pm *PortMapper) forward(action iptables.Action, proto string, sourceIP net.IP, sourcePort int, containerIP string, containerPort int) error {
|
|
if pm.chain == nil {
|
|
return nil
|
|
}
|
|
return pm.chain.Forward(action, sourceIP, sourcePort, proto, containerIP, containerPort)
|
|
}
|