When a container was being destroyed was possible to have
flows in conntrack left behind on the host.
If a flow is present into the conntrack table, the packet
processing will skip the POSTROUTING table of iptables and
will use the information in conntrack to do the translation.
For this reason is possible that long lived flows created
towards a container that is destroyed, will actually affect
new flows incoming to the host, creating erroneous conditions
where traffic cannot reach new containers.
The fix takes care of cleaning them up when a container is
destroyed.

The test of this commit is actually reproducing the condition
where an UDP flow is established towards a container that is then
destroyed. The test verifies that the flow established is gone
after the container is destroyed.

Signed-off-by: Flavio Crisciani <flavio.crisciani@docker.com>
This commit is contained in:
Flavio Crisciani 2017-04-10 17:12:14 -07:00
parent c16eb5f88a
commit 1c4286bcff
No known key found for this signature in database
GPG Key ID: 28CAFCE754CF3A48
1 changed files with 54 additions and 0 deletions

View File

@ -12,6 +12,7 @@ import (
"os"
"path/filepath"
"strings"
"syscall"
"time"
"github.com/docker/docker/api/types"
@ -1789,3 +1790,56 @@ func (s *DockerNetworkSuite) TestDockerNetworkDisconnectFromBridge(c *check.C) {
_, _, err := dockerCmdWithError("network", "disconnect", network, name)
c.Assert(err, check.IsNil)
}
// TestConntrackFlowsLeak covers the failure scenario of ticket: https://github.com/docker/docker/issues/8795
// Validates that conntrack is correctly cleaned once a container is destroyed
func (s *DockerNetworkSuite) TestConntrackFlowsLeak(c *check.C) {
testRequires(c, IsAmd64, DaemonIsLinux, Network)
// Create a new network
dockerCmd(c, "network", "create", "--subnet=192.168.10.0/24", "--gateway=192.168.10.1", "-o", "com.docker.network.bridge.host_binding_ipv4=192.168.10.1", "testbind")
assertNwIsAvailable(c, "testbind")
// Launch the server, this will remain listening on an exposed port and reply to any request in a ping/pong fashion
cmd := "while true; do echo hello | nc -w 1 -lu 8080; done"
_, _, err := dockerCmdWithError("run", "-d", "--name", "server", "--net", "testbind", "-p", "8080:8080/udp", "appropriate/nc", "sh", "-c", cmd)
c.Assert(err, check.IsNil)
// Launch a container client, here the objective is to create a flow that is natted in order to expose the bug
cmd = "echo world | nc -q 1 -u 192.168.10.1 8080"
_, _, err = dockerCmdWithError("run", "-d", "--name", "client", "--net=host", "appropriate/nc", "sh", "-c", cmd)
c.Assert(err, check.IsNil)
// Get all the flows using netlink
flows, err := netlink.ConntrackTableList(netlink.ConntrackTable, syscall.AF_INET)
c.Assert(err, check.IsNil)
var flowMatch int
for _, flow := range flows {
// count only the flows that we are interested in, skipping others that can be laying around the host
if flow.Forward.Protocol == syscall.IPPROTO_UDP &&
flow.Forward.DstIP.Equal(net.ParseIP("192.168.10.1")) &&
flow.Forward.DstPort == 8080 {
flowMatch++
}
}
// The client should have created only 1 flow
c.Assert(flowMatch, checker.Equals, 1)
// Now delete the server, this will trigger the conntrack cleanup
err = deleteContainer("server")
c.Assert(err, checker.IsNil)
// Fetch again all the flows and validate that there is no server flow in the conntrack laying around
flows, err = netlink.ConntrackTableList(netlink.ConntrackTable, syscall.AF_INET)
c.Assert(err, check.IsNil)
flowMatch = 0
for _, flow := range flows {
if flow.Forward.Protocol == syscall.IPPROTO_UDP &&
flow.Forward.DstIP.Equal(net.ParseIP("192.168.10.1")) &&
flow.Forward.DstPort == 8080 {
flowMatch++
}
}
// All the flows have to be gone
c.Assert(flowMatch, checker.Equals, 0)
}