1
0
Fork 0
mirror of https://github.com/moby/moby.git synced 2022-11-09 12:21:53 -05:00

Merge pull request #35283 from darrenstahlmsft/revendorLibnetwork

Update libnetwork
This commit is contained in:
Sebastiaan van Stijn 2017-10-26 00:03:33 +02:00 committed by GitHub
commit 074b1fc47b
38 changed files with 24 additions and 2427 deletions

View file

@ -30,7 +30,7 @@ github.com/moby/buildkit aaff9d591ef128560018433fe61beb802e149de8
github.com/tonistiigi/fsutil dea3a0da73aee887fc02142d995be764106ac5e2
#get libnetwork packages
github.com/docker/libnetwork 68f1039f172434709a4550fe92e3e058406c74ce
github.com/docker/libnetwork 72fd7e5495eba86e28012e39b5ed63ef9ca9a97b
github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80
github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec

View file

@ -1,80 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tar_test
import (
"archive/tar"
"bytes"
"fmt"
"io"
"log"
"os"
)
func Example() {
// Create a buffer to write our archive to.
buf := new(bytes.Buffer)
// Create a new tar archive.
tw := tar.NewWriter(buf)
// Add some files to the archive.
var files = []struct {
Name, Body string
}{
{"readme.txt", "This archive contains some text files."},
{"gopher.txt", "Gopher names:\nGeorge\nGeoffrey\nGonzo"},
{"todo.txt", "Get animal handling license."},
}
for _, file := range files {
hdr := &tar.Header{
Name: file.Name,
Mode: 0600,
Size: int64(len(file.Body)),
}
if err := tw.WriteHeader(hdr); err != nil {
log.Fatalln(err)
}
if _, err := tw.Write([]byte(file.Body)); err != nil {
log.Fatalln(err)
}
}
// Make sure to check the error on Close.
if err := tw.Close(); err != nil {
log.Fatalln(err)
}
// Open the tar archive for reading.
r := bytes.NewReader(buf.Bytes())
tr := tar.NewReader(r)
// Iterate through the files in the archive.
for {
hdr, err := tr.Next()
if err == io.EOF {
// end of tar archive
break
}
if err != nil {
log.Fatalln(err)
}
fmt.Printf("Contents of %s:\n", hdr.Name)
if _, err := io.Copy(os.Stdout, tr); err != nil {
log.Fatalln(err)
}
fmt.Println()
}
// Output:
// Contents of readme.txt:
// This archive contains some text files.
// Contents of gopher.txt:
// Gopher names:
// George
// Geoffrey
// Gonzo
// Contents of todo.txt:
// Get animal handling license.
}

File diff suppressed because it is too large Load diff

View file

@ -1,319 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tar
import (
"math"
"strings"
"testing"
"time"
)
func TestFitsInBase256(t *testing.T) {
vectors := []struct {
in int64
width int
ok bool
}{
{+1, 8, true},
{0, 8, true},
{-1, 8, true},
{1 << 56, 8, false},
{(1 << 56) - 1, 8, true},
{-1 << 56, 8, true},
{(-1 << 56) - 1, 8, false},
{121654, 8, true},
{-9849849, 8, true},
{math.MaxInt64, 9, true},
{0, 9, true},
{math.MinInt64, 9, true},
{math.MaxInt64, 12, true},
{0, 12, true},
{math.MinInt64, 12, true},
}
for _, v := range vectors {
ok := fitsInBase256(v.width, v.in)
if ok != v.ok {
t.Errorf("fitsInBase256(%d, %d): got %v, want %v", v.in, v.width, ok, v.ok)
}
}
}
func TestParseNumeric(t *testing.T) {
vectors := []struct {
in string
want int64
ok bool
}{
// Test base-256 (binary) encoded values.
{"", 0, true},
{"\x80", 0, true},
{"\x80\x00", 0, true},
{"\x80\x00\x00", 0, true},
{"\xbf", (1 << 6) - 1, true},
{"\xbf\xff", (1 << 14) - 1, true},
{"\xbf\xff\xff", (1 << 22) - 1, true},
{"\xff", -1, true},
{"\xff\xff", -1, true},
{"\xff\xff\xff", -1, true},
{"\xc0", -1 * (1 << 6), true},
{"\xc0\x00", -1 * (1 << 14), true},
{"\xc0\x00\x00", -1 * (1 << 22), true},
{"\x87\x76\xa2\x22\xeb\x8a\x72\x61", 537795476381659745, true},
{"\x80\x00\x00\x00\x07\x76\xa2\x22\xeb\x8a\x72\x61", 537795476381659745, true},
{"\xf7\x76\xa2\x22\xeb\x8a\x72\x61", -615126028225187231, true},
{"\xff\xff\xff\xff\xf7\x76\xa2\x22\xeb\x8a\x72\x61", -615126028225187231, true},
{"\x80\x7f\xff\xff\xff\xff\xff\xff\xff", math.MaxInt64, true},
{"\x80\x80\x00\x00\x00\x00\x00\x00\x00", 0, false},
{"\xff\x80\x00\x00\x00\x00\x00\x00\x00", math.MinInt64, true},
{"\xff\x7f\xff\xff\xff\xff\xff\xff\xff", 0, false},
{"\xf5\xec\xd1\xc7\x7e\x5f\x26\x48\x81\x9f\x8f\x9b", 0, false},
// Test base-8 (octal) encoded values.
{"0000000\x00", 0, true},
{" \x0000000\x00", 0, true},
{" \x0000003\x00", 3, true},
{"00000000227\x00", 0227, true},
{"032033\x00 ", 032033, true},
{"320330\x00 ", 0320330, true},
{"0000660\x00 ", 0660, true},
{"\x00 0000660\x00 ", 0660, true},
{"0123456789abcdef", 0, false},
{"0123456789\x00abcdef", 0, false},
{"01234567\x0089abcdef", 342391, true},
{"0123\x7e\x5f\x264123", 0, false},
}
for _, v := range vectors {
var p parser
got := p.parseNumeric([]byte(v.in))
ok := (p.err == nil)
if ok != v.ok {
if v.ok {
t.Errorf("parseNumeric(%q): got parsing failure, want success", v.in)
} else {
t.Errorf("parseNumeric(%q): got parsing success, want failure", v.in)
}
}
if ok && got != v.want {
t.Errorf("parseNumeric(%q): got %d, want %d", v.in, got, v.want)
}
}
}
func TestFormatNumeric(t *testing.T) {
vectors := []struct {
in int64
want string
ok bool
}{
// Test base-256 (binary) encoded values.
{-1, "\xff", true},
{-1, "\xff\xff", true},
{-1, "\xff\xff\xff", true},
{(1 << 0), "0", false},
{(1 << 8) - 1, "\x80\xff", true},
{(1 << 8), "0\x00", false},
{(1 << 16) - 1, "\x80\xff\xff", true},
{(1 << 16), "00\x00", false},
{-1 * (1 << 0), "\xff", true},
{-1*(1<<0) - 1, "0", false},
{-1 * (1 << 8), "\xff\x00", true},
{-1*(1<<8) - 1, "0\x00", false},
{-1 * (1 << 16), "\xff\x00\x00", true},
{-1*(1<<16) - 1, "00\x00", false},
{537795476381659745, "0000000\x00", false},
{537795476381659745, "\x80\x00\x00\x00\x07\x76\xa2\x22\xeb\x8a\x72\x61", true},
{-615126028225187231, "0000000\x00", false},
{-615126028225187231, "\xff\xff\xff\xff\xf7\x76\xa2\x22\xeb\x8a\x72\x61", true},
{math.MaxInt64, "0000000\x00", false},
{math.MaxInt64, "\x80\x00\x00\x00\x7f\xff\xff\xff\xff\xff\xff\xff", true},
{math.MinInt64, "0000000\x00", false},
{math.MinInt64, "\xff\xff\xff\xff\x80\x00\x00\x00\x00\x00\x00\x00", true},
{math.MaxInt64, "\x80\x7f\xff\xff\xff\xff\xff\xff\xff", true},
{math.MinInt64, "\xff\x80\x00\x00\x00\x00\x00\x00\x00", true},
}
for _, v := range vectors {
var f formatter
got := make([]byte, len(v.want))
f.formatNumeric(got, v.in)
ok := (f.err == nil)
if ok != v.ok {
if v.ok {
t.Errorf("formatNumeric(%d): got formatting failure, want success", v.in)
} else {
t.Errorf("formatNumeric(%d): got formatting success, want failure", v.in)
}
}
if string(got) != v.want {
t.Errorf("formatNumeric(%d): got %q, want %q", v.in, got, v.want)
}
}
}
func TestParsePAXTime(t *testing.T) {
vectors := []struct {
in string
want time.Time
ok bool
}{
{"1350244992.023960108", time.Unix(1350244992, 23960108), true},
{"1350244992.02396010", time.Unix(1350244992, 23960100), true},
{"1350244992.0239601089", time.Unix(1350244992, 23960108), true},
{"1350244992.3", time.Unix(1350244992, 300000000), true},
{"1350244992", time.Unix(1350244992, 0), true},
{"-1.000000001", time.Unix(-1, -1e0+0e0), true},
{"-1.000001", time.Unix(-1, -1e3+0e0), true},
{"-1.001000", time.Unix(-1, -1e6+0e0), true},
{"-1", time.Unix(-1, -0e0+0e0), true},
{"-1.999000", time.Unix(-1, -1e9+1e6), true},
{"-1.999999", time.Unix(-1, -1e9+1e3), true},
{"-1.999999999", time.Unix(-1, -1e9+1e0), true},
{"0.000000001", time.Unix(0, 1e0+0e0), true},
{"0.000001", time.Unix(0, 1e3+0e0), true},
{"0.001000", time.Unix(0, 1e6+0e0), true},
{"0", time.Unix(0, 0e0), true},
{"0.999000", time.Unix(0, 1e9-1e6), true},
{"0.999999", time.Unix(0, 1e9-1e3), true},
{"0.999999999", time.Unix(0, 1e9-1e0), true},
{"1.000000001", time.Unix(+1, +1e0-0e0), true},
{"1.000001", time.Unix(+1, +1e3-0e0), true},
{"1.001000", time.Unix(+1, +1e6-0e0), true},
{"1", time.Unix(+1, +0e0-0e0), true},
{"1.999000", time.Unix(+1, +1e9-1e6), true},
{"1.999999", time.Unix(+1, +1e9-1e3), true},
{"1.999999999", time.Unix(+1, +1e9-1e0), true},
{"-1350244992.023960108", time.Unix(-1350244992, -23960108), true},
{"-1350244992.02396010", time.Unix(-1350244992, -23960100), true},
{"-1350244992.0239601089", time.Unix(-1350244992, -23960108), true},
{"-1350244992.3", time.Unix(-1350244992, -300000000), true},
{"-1350244992", time.Unix(-1350244992, 0), true},
{"", time.Time{}, false},
{"0", time.Unix(0, 0), true},
{"1.", time.Unix(1, 0), true},
{"0.0", time.Unix(0, 0), true},
{".5", time.Time{}, false},
{"-1.3", time.Unix(-1, -3e8), true},
{"-1.0", time.Unix(-1, -0e0), true},
{"-0.0", time.Unix(-0, -0e0), true},
{"-0.1", time.Unix(-0, -1e8), true},
{"-0.01", time.Unix(-0, -1e7), true},
{"-0.99", time.Unix(-0, -99e7), true},
{"-0.98", time.Unix(-0, -98e7), true},
{"-1.1", time.Unix(-1, -1e8), true},
{"-1.01", time.Unix(-1, -1e7), true},
{"-2.99", time.Unix(-2, -99e7), true},
{"-5.98", time.Unix(-5, -98e7), true},
{"-", time.Time{}, false},
{"+", time.Time{}, false},
{"-1.-1", time.Time{}, false},
{"99999999999999999999999999999999999999999999999", time.Time{}, false},
{"0.123456789abcdef", time.Time{}, false},
{"foo", time.Time{}, false},
{"\x00", time.Time{}, false},
{"𝟵𝟴𝟳𝟲𝟱.𝟰𝟯𝟮𝟭𝟬", time.Time{}, false}, // Unicode numbers (U+1D7EC to U+1D7F5)
{"98765﹒43210", time.Time{}, false}, // Unicode period (U+FE52)
}
for _, v := range vectors {
ts, err := parsePAXTime(v.in)
ok := (err == nil)
if v.ok != ok {
if v.ok {
t.Errorf("parsePAXTime(%q): got parsing failure, want success", v.in)
} else {
t.Errorf("parsePAXTime(%q): got parsing success, want failure", v.in)
}
}
if ok && !ts.Equal(v.want) {
t.Errorf("parsePAXTime(%q): got (%ds %dns), want (%ds %dns)",
v.in, ts.Unix(), ts.Nanosecond(), v.want.Unix(), v.want.Nanosecond())
}
}
}
func TestParsePAXRecord(t *testing.T) {
medName := strings.Repeat("CD", 50)
longName := strings.Repeat("AB", 100)
vectors := []struct {
in string
wantRes string
wantKey string
wantVal string
ok bool
}{
{"6 k=v\n\n", "\n", "k", "v", true},
{"19 path=/etc/hosts\n", "", "path", "/etc/hosts", true},
{"210 path=" + longName + "\nabc", "abc", "path", longName, true},
{"110 path=" + medName + "\n", "", "path", medName, true},
{"9 foo=ba\n", "", "foo", "ba", true},
{"11 foo=bar\n\x00", "\x00", "foo", "bar", true},
{"18 foo=b=\nar=\n==\x00\n", "", "foo", "b=\nar=\n==\x00", true},
{"27 foo=hello9 foo=ba\nworld\n", "", "foo", "hello9 foo=ba\nworld", true},
{"27 ☺☻☹=日a本b語ç\nmeow mix", "meow mix", "☺☻☹", "日a本b語ç", true},
{"17 \x00hello=\x00world\n", "", "\x00hello", "\x00world", true},
{"1 k=1\n", "1 k=1\n", "", "", false},
{"6 k~1\n", "6 k~1\n", "", "", false},
{"6_k=1\n", "6_k=1\n", "", "", false},
{"6 k=1 ", "6 k=1 ", "", "", false},
{"632 k=1\n", "632 k=1\n", "", "", false},
{"16 longkeyname=hahaha\n", "16 longkeyname=hahaha\n", "", "", false},
{"3 somelongkey=\n", "3 somelongkey=\n", "", "", false},
{"50 tooshort=\n", "50 tooshort=\n", "", "", false},
}
for _, v := range vectors {
key, val, res, err := parsePAXRecord(v.in)
ok := (err == nil)
if ok != v.ok {
if v.ok {
t.Errorf("parsePAXRecord(%q): got parsing failure, want success", v.in)
} else {
t.Errorf("parsePAXRecord(%q): got parsing success, want failure", v.in)
}
}
if v.ok && (key != v.wantKey || val != v.wantVal) {
t.Errorf("parsePAXRecord(%q): got (%q: %q), want (%q: %q)",
v.in, key, val, v.wantKey, v.wantVal)
}
if res != v.wantRes {
t.Errorf("parsePAXRecord(%q): got residual %q, want residual %q",
v.in, res, v.wantRes)
}
}
}
func TestFormatPAXRecord(t *testing.T) {
medName := strings.Repeat("CD", 50)
longName := strings.Repeat("AB", 100)
vectors := []struct {
inKey string
inVal string
want string
}{
{"k", "v", "6 k=v\n"},
{"path", "/etc/hosts", "19 path=/etc/hosts\n"},
{"path", longName, "210 path=" + longName + "\n"},
{"path", medName, "110 path=" + medName + "\n"},
{"foo", "ba", "9 foo=ba\n"},
{"foo", "bar", "11 foo=bar\n"},
{"foo", "b=\nar=\n==\x00", "18 foo=b=\nar=\n==\x00\n"},
{"foo", "hello9 foo=ba\nworld", "27 foo=hello9 foo=ba\nworld\n"},
{"☺☻☹", "日a本b語ç", "27 ☺☻☹=日a本b語ç\n"},
{"\x00hello", "\x00world", "17 \x00hello=\x00world\n"},
}
for _, v := range vectors {
got := formatPAXRecord(v.inKey, v.inVal)
if got != v.want {
t.Errorf("formatPAXRecord(%q, %q): got %q, want %q",
v.inKey, v.inVal, got, v.want)
}
}
}

View file

@ -1,313 +0,0 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tar
import (
"bytes"
"io/ioutil"
"os"
"path"
"reflect"
"strings"
"testing"
"time"
)
func TestFileInfoHeader(t *testing.T) {
fi, err := os.Stat("testdata/small.txt")
if err != nil {
t.Fatal(err)
}
h, err := FileInfoHeader(fi, "")
if err != nil {
t.Fatalf("FileInfoHeader: %v", err)
}
if g, e := h.Name, "small.txt"; g != e {
t.Errorf("Name = %q; want %q", g, e)
}
if g, e := h.Mode, int64(fi.Mode().Perm())|c_ISREG; g != e {
t.Errorf("Mode = %#o; want %#o", g, e)
}
if g, e := h.Size, int64(5); g != e {
t.Errorf("Size = %v; want %v", g, e)
}
if g, e := h.ModTime, fi.ModTime(); !g.Equal(e) {
t.Errorf("ModTime = %v; want %v", g, e)
}
// FileInfoHeader should error when passing nil FileInfo
if _, err := FileInfoHeader(nil, ""); err == nil {
t.Fatalf("Expected error when passing nil to FileInfoHeader")
}
}
func TestFileInfoHeaderDir(t *testing.T) {
fi, err := os.Stat("testdata")
if err != nil {
t.Fatal(err)
}
h, err := FileInfoHeader(fi, "")
if err != nil {
t.Fatalf("FileInfoHeader: %v", err)
}
if g, e := h.Name, "testdata/"; g != e {
t.Errorf("Name = %q; want %q", g, e)
}
// Ignoring c_ISGID for golang.org/issue/4867
if g, e := h.Mode&^c_ISGID, int64(fi.Mode().Perm())|c_ISDIR; g != e {
t.Errorf("Mode = %#o; want %#o", g, e)
}
if g, e := h.Size, int64(0); g != e {
t.Errorf("Size = %v; want %v", g, e)
}
if g, e := h.ModTime, fi.ModTime(); !g.Equal(e) {
t.Errorf("ModTime = %v; want %v", g, e)
}
}
func TestFileInfoHeaderSymlink(t *testing.T) {
h, err := FileInfoHeader(symlink{}, "some-target")
if err != nil {
t.Fatal(err)
}
if g, e := h.Name, "some-symlink"; g != e {
t.Errorf("Name = %q; want %q", g, e)
}
if g, e := h.Linkname, "some-target"; g != e {
t.Errorf("Linkname = %q; want %q", g, e)
}
}
type symlink struct{}
func (symlink) Name() string { return "some-symlink" }
func (symlink) Size() int64 { return 0 }
func (symlink) Mode() os.FileMode { return os.ModeSymlink }
func (symlink) ModTime() time.Time { return time.Time{} }
func (symlink) IsDir() bool { return false }
func (symlink) Sys() interface{} { return nil }
func TestRoundTrip(t *testing.T) {
data := []byte("some file contents")
var b bytes.Buffer
tw := NewWriter(&b)
hdr := &Header{
Name: "file.txt",
Uid: 1 << 21, // too big for 8 octal digits
Size: int64(len(data)),
ModTime: time.Now(),
}
// tar only supports second precision.
hdr.ModTime = hdr.ModTime.Add(-time.Duration(hdr.ModTime.Nanosecond()) * time.Nanosecond)
if err := tw.WriteHeader(hdr); err != nil {
t.Fatalf("tw.WriteHeader: %v", err)
}
if _, err := tw.Write(data); err != nil {
t.Fatalf("tw.Write: %v", err)
}
if err := tw.Close(); err != nil {
t.Fatalf("tw.Close: %v", err)
}
// Read it back.
tr := NewReader(&b)
rHdr, err := tr.Next()
if err != nil {
t.Fatalf("tr.Next: %v", err)
}
if !reflect.DeepEqual(rHdr, hdr) {
t.Errorf("Header mismatch.\n got %+v\nwant %+v", rHdr, hdr)
}
rData, err := ioutil.ReadAll(tr)
if err != nil {
t.Fatalf("Read: %v", err)
}
if !bytes.Equal(rData, data) {
t.Errorf("Data mismatch.\n got %q\nwant %q", rData, data)
}
}
type headerRoundTripTest struct {
h *Header
fm os.FileMode
}
func TestHeaderRoundTrip(t *testing.T) {
vectors := []headerRoundTripTest{{
// regular file.
h: &Header{
Name: "test.txt",
Mode: 0644 | c_ISREG,
Size: 12,
ModTime: time.Unix(1360600916, 0),
Typeflag: TypeReg,
},
fm: 0644,
}, {
// symbolic link.
h: &Header{
Name: "link.txt",
Mode: 0777 | c_ISLNK,
Size: 0,
ModTime: time.Unix(1360600852, 0),
Typeflag: TypeSymlink,
},
fm: 0777 | os.ModeSymlink,
}, {
// character device node.
h: &Header{
Name: "dev/null",
Mode: 0666 | c_ISCHR,
Size: 0,
ModTime: time.Unix(1360578951, 0),
Typeflag: TypeChar,
},
fm: 0666 | os.ModeDevice | os.ModeCharDevice,
}, {
// block device node.
h: &Header{
Name: "dev/sda",
Mode: 0660 | c_ISBLK,
Size: 0,
ModTime: time.Unix(1360578954, 0),
Typeflag: TypeBlock,
},
fm: 0660 | os.ModeDevice,
}, {
// directory.
h: &Header{
Name: "dir/",
Mode: 0755 | c_ISDIR,
Size: 0,
ModTime: time.Unix(1360601116, 0),
Typeflag: TypeDir,
},
fm: 0755 | os.ModeDir,
}, {
// fifo node.
h: &Header{
Name: "dev/initctl",
Mode: 0600 | c_ISFIFO,
Size: 0,
ModTime: time.Unix(1360578949, 0),
Typeflag: TypeFifo,
},
fm: 0600 | os.ModeNamedPipe,
}, {
// setuid.
h: &Header{
Name: "bin/su",
Mode: 0755 | c_ISREG | c_ISUID,
Size: 23232,
ModTime: time.Unix(1355405093, 0),
Typeflag: TypeReg,
},
fm: 0755 | os.ModeSetuid,
}, {
// setguid.
h: &Header{
Name: "group.txt",
Mode: 0750 | c_ISREG | c_ISGID,
Size: 0,
ModTime: time.Unix(1360602346, 0),
Typeflag: TypeReg,
},
fm: 0750 | os.ModeSetgid,
}, {
// sticky.
h: &Header{
Name: "sticky.txt",
Mode: 0600 | c_ISREG | c_ISVTX,
Size: 7,
ModTime: time.Unix(1360602540, 0),
Typeflag: TypeReg,
},
fm: 0600 | os.ModeSticky,
}, {
// hard link.
h: &Header{
Name: "hard.txt",
Mode: 0644 | c_ISREG,
Size: 0,
Linkname: "file.txt",
ModTime: time.Unix(1360600916, 0),
Typeflag: TypeLink,
},
fm: 0644,
}, {
// More information.
h: &Header{
Name: "info.txt",
Mode: 0600 | c_ISREG,
Size: 0,
Uid: 1000,
Gid: 1000,
ModTime: time.Unix(1360602540, 0),
Uname: "slartibartfast",
Gname: "users",
Typeflag: TypeReg,
},
fm: 0600,
}}
for i, v := range vectors {
fi := v.h.FileInfo()
h2, err := FileInfoHeader(fi, "")
if err != nil {
t.Error(err)
continue
}
if strings.Contains(fi.Name(), "/") {
t.Errorf("FileInfo of %q contains slash: %q", v.h.Name, fi.Name())
}
name := path.Base(v.h.Name)
if fi.IsDir() {
name += "/"
}
if got, want := h2.Name, name; got != want {
t.Errorf("i=%d: Name: got %v, want %v", i, got, want)
}
if got, want := h2.Size, v.h.Size; got != want {
t.Errorf("i=%d: Size: got %v, want %v", i, got, want)
}
if got, want := h2.Uid, v.h.Uid; got != want {
t.Errorf("i=%d: Uid: got %d, want %d", i, got, want)
}
if got, want := h2.Gid, v.h.Gid; got != want {
t.Errorf("i=%d: Gid: got %d, want %d", i, got, want)
}
if got, want := h2.Uname, v.h.Uname; got != want {
t.Errorf("i=%d: Uname: got %q, want %q", i, got, want)
}
if got, want := h2.Gname, v.h.Gname; got != want {
t.Errorf("i=%d: Gname: got %q, want %q", i, got, want)
}
if got, want := h2.Linkname, v.h.Linkname; got != want {
t.Errorf("i=%d: Linkname: got %v, want %v", i, got, want)
}
if got, want := h2.Typeflag, v.h.Typeflag; got != want {
t.Logf("%#v %#v", v.h, fi.Sys())
t.Errorf("i=%d: Typeflag: got %q, want %q", i, got, want)
}
if got, want := h2.Mode, v.h.Mode; got != want {
t.Errorf("i=%d: Mode: got %o, want %o", i, got, want)
}
if got, want := fi.Mode(), v.fm; got != want {
t.Errorf("i=%d: fi.Mode: got %o, want %o", i, got, want)
}
if got, want := h2.AccessTime, v.h.AccessTime; got != want {
t.Errorf("i=%d: AccessTime: got %v, want %v", i, got, want)
}
if got, want := h2.ChangeTime, v.h.ChangeTime; got != want {
t.Errorf("i=%d: ChangeTime: got %v, want %v", i, got, want)
}
if got, want := h2.ModTime, v.h.ModTime; got != want {
t.Errorf("i=%d: ModTime: got %v, want %v", i, got, want)
}
if sysh, ok := fi.Sys().(*Header); !ok || sysh != v.h {
t.Errorf("i=%d: Sys didn't return original *Header", i)
}
}
}

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View file

@ -1 +0,0 @@
Kilts

View file

@ -1 +0,0 @@
Google.com

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View file

@ -1,647 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tar
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"reflect"
"sort"
"strings"
"testing"
"testing/iotest"
"time"
)
// Render byte array in a two-character hexadecimal string, spaced for easy visual inspection.
func bytestr(offset int, b []byte) string {
const rowLen = 32
s := fmt.Sprintf("%04x ", offset)
for _, ch := range b {
switch {
case '0' <= ch && ch <= '9', 'A' <= ch && ch <= 'Z', 'a' <= ch && ch <= 'z':
s += fmt.Sprintf(" %c", ch)
default:
s += fmt.Sprintf(" %02x", ch)
}
}
return s
}
// Render a pseudo-diff between two blocks of bytes.
func bytediff(a []byte, b []byte) string {
const rowLen = 32
s := fmt.Sprintf("(%d bytes vs. %d bytes)\n", len(a), len(b))
for offset := 0; len(a)+len(b) > 0; offset += rowLen {
na, nb := rowLen, rowLen
if na > len(a) {
na = len(a)
}
if nb > len(b) {
nb = len(b)
}
sa := bytestr(offset, a[0:na])
sb := bytestr(offset, b[0:nb])
if sa != sb {
s += fmt.Sprintf("-%v\n+%v\n", sa, sb)
}
a = a[na:]
b = b[nb:]
}
return s
}
func TestWriter(t *testing.T) {
type entry struct {
header *Header
contents string
}
vectors := []struct {
file string // filename of expected output
entries []*entry
}{{
// The writer test file was produced with this command:
// tar (GNU tar) 1.26
// ln -s small.txt link.txt
// tar -b 1 --format=ustar -c -f writer.tar small.txt small2.txt link.txt
file: "testdata/writer.tar",
entries: []*entry{{
header: &Header{
Name: "small.txt",
Mode: 0640,
Uid: 73025,
Gid: 5000,
Size: 5,
ModTime: time.Unix(1246508266, 0),
Typeflag: '0',
Uname: "dsymonds",
Gname: "eng",
},
contents: "Kilts",
}, {
header: &Header{
Name: "small2.txt",
Mode: 0640,
Uid: 73025,
Gid: 5000,
Size: 11,
ModTime: time.Unix(1245217492, 0),
Typeflag: '0',
Uname: "dsymonds",
Gname: "eng",
},
contents: "Google.com\n",
}, {
header: &Header{
Name: "link.txt",
Mode: 0777,
Uid: 1000,
Gid: 1000,
Size: 0,
ModTime: time.Unix(1314603082, 0),
Typeflag: '2',
Linkname: "small.txt",
Uname: "strings",
Gname: "strings",
},
// no contents
}},
}, {
// The truncated test file was produced using these commands:
// dd if=/dev/zero bs=1048576 count=16384 > /tmp/16gig.txt
// tar -b 1 -c -f- /tmp/16gig.txt | dd bs=512 count=8 > writer-big.tar
file: "testdata/writer-big.tar",
entries: []*entry{{
header: &Header{
Name: "tmp/16gig.txt",
Mode: 0640,
Uid: 73025,
Gid: 5000,
Size: 16 << 30,
ModTime: time.Unix(1254699560, 0),
Typeflag: '0',
Uname: "dsymonds",
Gname: "eng",
},
// fake contents
contents: strings.Repeat("\x00", 4<<10),
}},
}, {
// This truncated file was produced using this library.
// It was verified to work with GNU tar 1.27.1 and BSD tar 3.1.2.
// dd if=/dev/zero bs=1G count=16 >> writer-big-long.tar
// gnutar -xvf writer-big-long.tar
// bsdtar -xvf writer-big-long.tar
//
// This file is in PAX format.
file: "testdata/writer-big-long.tar",
entries: []*entry{{
header: &Header{
Name: strings.Repeat("longname/", 15) + "16gig.txt",
Mode: 0644,
Uid: 1000,
Gid: 1000,
Size: 16 << 30,
ModTime: time.Unix(1399583047, 0),
Typeflag: '0',
Uname: "guillaume",
Gname: "guillaume",
},
// fake contents
contents: strings.Repeat("\x00", 4<<10),
}},
}, {
// TODO(dsnet): The Writer output should match the following file.
// To fix an issue (see https://golang.org/issue/12594), we disabled
// prefix support, which alters the generated output.
/*
// This file was produced using gnu tar 1.17
// gnutar -b 4 --format=ustar (longname/)*15 + file.txt
file: "testdata/ustar.tar"
*/
file: "testdata/ustar.issue12594.tar", // This is a valid tar file, but not expected
entries: []*entry{{
header: &Header{
Name: strings.Repeat("longname/", 15) + "file.txt",
Mode: 0644,
Uid: 0765,
Gid: 024,
Size: 06,
ModTime: time.Unix(1360135598, 0),
Typeflag: '0',
Uname: "shane",
Gname: "staff",
},
contents: "hello\n",
}},
}, {
// This file was produced using gnu tar 1.26
// echo "Slartibartfast" > file.txt
// ln file.txt hard.txt
// tar -b 1 --format=ustar -c -f hardlink.tar file.txt hard.txt
file: "testdata/hardlink.tar",
entries: []*entry{{
header: &Header{
Name: "file.txt",
Mode: 0644,
Uid: 1000,
Gid: 100,
Size: 15,
ModTime: time.Unix(1425484303, 0),
Typeflag: '0',
Uname: "vbatts",
Gname: "users",
},
contents: "Slartibartfast\n",
}, {
header: &Header{
Name: "hard.txt",
Mode: 0644,
Uid: 1000,
Gid: 100,
Size: 0,
ModTime: time.Unix(1425484303, 0),
Typeflag: '1',
Linkname: "file.txt",
Uname: "vbatts",
Gname: "users",
},
// no contents
}},
}}
testLoop:
for i, v := range vectors {
expected, err := ioutil.ReadFile(v.file)
if err != nil {
t.Errorf("test %d: Unexpected error: %v", i, err)
continue
}
buf := new(bytes.Buffer)
tw := NewWriter(iotest.TruncateWriter(buf, 4<<10)) // only catch the first 4 KB
big := false
for j, entry := range v.entries {
big = big || entry.header.Size > 1<<10
if err := tw.WriteHeader(entry.header); err != nil {
t.Errorf("test %d, entry %d: Failed writing header: %v", i, j, err)
continue testLoop
}
if _, err := io.WriteString(tw, entry.contents); err != nil {
t.Errorf("test %d, entry %d: Failed writing contents: %v", i, j, err)
continue testLoop
}
}
// Only interested in Close failures for the small tests.
if err := tw.Close(); err != nil && !big {
t.Errorf("test %d: Failed closing archive: %v", i, err)
continue testLoop
}
actual := buf.Bytes()
if !bytes.Equal(expected, actual) {
t.Errorf("test %d: Incorrect result: (-=expected, +=actual)\n%v",
i, bytediff(expected, actual))
}
if testing.Short() { // The second test is expensive.
break
}
}
}
func TestPax(t *testing.T) {
// Create an archive with a large name
fileinfo, err := os.Stat("testdata/small.txt")
if err != nil {
t.Fatal(err)
}
hdr, err := FileInfoHeader(fileinfo, "")
if err != nil {
t.Fatalf("os.Stat: %v", err)
}
// Force a PAX long name to be written
longName := strings.Repeat("ab", 100)
contents := strings.Repeat(" ", int(hdr.Size))
hdr.Name = longName
var buf bytes.Buffer
writer := NewWriter(&buf)
if err := writer.WriteHeader(hdr); err != nil {
t.Fatal(err)
}
if _, err = writer.Write([]byte(contents)); err != nil {
t.Fatal(err)
}
if err := writer.Close(); err != nil {
t.Fatal(err)
}
// Simple test to make sure PAX extensions are in effect
if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) {
t.Fatal("Expected at least one PAX header to be written.")
}
// Test that we can get a long name back out of the archive.
reader := NewReader(&buf)
hdr, err = reader.Next()
if err != nil {
t.Fatal(err)
}
if hdr.Name != longName {
t.Fatal("Couldn't recover long file name")
}
}
func TestPaxSymlink(t *testing.T) {
// Create an archive with a large linkname
fileinfo, err := os.Stat("testdata/small.txt")
if err != nil {
t.Fatal(err)
}
hdr, err := FileInfoHeader(fileinfo, "")
hdr.Typeflag = TypeSymlink
if err != nil {
t.Fatalf("os.Stat:1 %v", err)
}
// Force a PAX long linkname to be written
longLinkname := strings.Repeat("1234567890/1234567890", 10)
hdr.Linkname = longLinkname
hdr.Size = 0
var buf bytes.Buffer
writer := NewWriter(&buf)
if err := writer.WriteHeader(hdr); err != nil {
t.Fatal(err)
}
if err := writer.Close(); err != nil {
t.Fatal(err)
}
// Simple test to make sure PAX extensions are in effect
if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) {
t.Fatal("Expected at least one PAX header to be written.")
}
// Test that we can get a long name back out of the archive.
reader := NewReader(&buf)
hdr, err = reader.Next()
if err != nil {
t.Fatal(err)
}
if hdr.Linkname != longLinkname {
t.Fatal("Couldn't recover long link name")
}
}
func TestPaxNonAscii(t *testing.T) {
// Create an archive with non ascii. These should trigger a pax header
// because pax headers have a defined utf-8 encoding.
fileinfo, err := os.Stat("testdata/small.txt")
if err != nil {
t.Fatal(err)
}
hdr, err := FileInfoHeader(fileinfo, "")
if err != nil {
t.Fatalf("os.Stat:1 %v", err)
}
// some sample data
chineseFilename := "文件名"
chineseGroupname := "組"
chineseUsername := "用戶名"
hdr.Name = chineseFilename
hdr.Gname = chineseGroupname
hdr.Uname = chineseUsername
contents := strings.Repeat(" ", int(hdr.Size))
var buf bytes.Buffer
writer := NewWriter(&buf)
if err := writer.WriteHeader(hdr); err != nil {
t.Fatal(err)
}
if _, err = writer.Write([]byte(contents)); err != nil {
t.Fatal(err)
}
if err := writer.Close(); err != nil {
t.Fatal(err)
}
// Simple test to make sure PAX extensions are in effect
if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) {
t.Fatal("Expected at least one PAX header to be written.")
}
// Test that we can get a long name back out of the archive.
reader := NewReader(&buf)
hdr, err = reader.Next()
if err != nil {
t.Fatal(err)
}
if hdr.Name != chineseFilename {
t.Fatal("Couldn't recover unicode name")
}
if hdr.Gname != chineseGroupname {
t.Fatal("Couldn't recover unicode group")
}
if hdr.Uname != chineseUsername {
t.Fatal("Couldn't recover unicode user")
}
}
func TestPaxXattrs(t *testing.T) {
xattrs := map[string]string{
"user.key": "value",
}
// Create an archive with an xattr
fileinfo, err := os.Stat("testdata/small.txt")
if err != nil {
t.Fatal(err)
}
hdr, err := FileInfoHeader(fileinfo, "")
if err != nil {
t.Fatalf("os.Stat: %v", err)
}
contents := "Kilts"
hdr.Xattrs = xattrs
var buf bytes.Buffer
writer := NewWriter(&buf)
if err := writer.WriteHeader(hdr); err != nil {
t.Fatal(err)
}
if _, err = writer.Write([]byte(contents)); err != nil {
t.Fatal(err)
}
if err := writer.Close(); err != nil {
t.Fatal(err)
}
// Test that we can get the xattrs back out of the archive.
reader := NewReader(&buf)
hdr, err = reader.Next()
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(hdr.Xattrs, xattrs) {
t.Fatalf("xattrs did not survive round trip: got %+v, want %+v",
hdr.Xattrs, xattrs)
}
}
func TestPaxHeadersSorted(t *testing.T) {
fileinfo, err := os.Stat("testdata/small.txt")
if err != nil {
t.Fatal(err)
}
hdr, err := FileInfoHeader(fileinfo, "")
if err != nil {
t.Fatalf("os.Stat: %v", err)
}
contents := strings.Repeat(" ", int(hdr.Size))
hdr.Xattrs = map[string]string{
"foo": "foo",
"bar": "bar",
"baz": "baz",
"qux": "qux",
}
var buf bytes.Buffer
writer := NewWriter(&buf)
if err := writer.WriteHeader(hdr); err != nil {
t.Fatal(err)
}
if _, err = writer.Write([]byte(contents)); err != nil {
t.Fatal(err)
}
if err := writer.Close(); err != nil {
t.Fatal(err)
}
// Simple test to make sure PAX extensions are in effect
if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) {
t.Fatal("Expected at least one PAX header to be written.")
}
// xattr bar should always appear before others
indices := []int{
bytes.Index(buf.Bytes(), []byte("bar=bar")),
bytes.Index(buf.Bytes(), []byte("baz=baz")),
bytes.Index(buf.Bytes(), []byte("foo=foo")),
bytes.Index(buf.Bytes(), []byte("qux=qux")),
}
if !sort.IntsAreSorted(indices) {
t.Fatal("PAX headers are not sorted")
}
}
func TestUSTARLongName(t *testing.T) {
// Create an archive with a path that failed to split with USTAR extension in previous versions.
fileinfo, err := os.Stat("testdata/small.txt")
if err != nil {
t.Fatal(err)
}
hdr, err := FileInfoHeader(fileinfo, "")
hdr.Typeflag = TypeDir
if err != nil {
t.Fatalf("os.Stat:1 %v", err)
}
// Force a PAX long name to be written. The name was taken from a practical example
// that fails and replaced ever char through numbers to anonymize the sample.
longName := "/0000_0000000/00000-000000000/0000_0000000/00000-0000000000000/0000_0000000/00000-0000000-00000000/0000_0000000/00000000/0000_0000000/000/0000_0000000/00000000v00/0000_0000000/000000/0000_0000000/0000000/0000_0000000/00000y-00/0000/0000/00000000/0x000000/"
hdr.Name = longName
hdr.Size = 0
var buf bytes.Buffer
writer := NewWriter(&buf)
if err := writer.WriteHeader(hdr); err != nil {
t.Fatal(err)
}
if err := writer.Close(); err != nil {
t.Fatal(err)
}
// Test that we can get a long name back out of the archive.
reader := NewReader(&buf)
hdr, err = reader.Next()
if err != nil {
t.Fatal(err)
}
if hdr.Name != longName {
t.Fatal("Couldn't recover long name")
}
}
func TestValidTypeflagWithPAXHeader(t *testing.T) {
var buffer bytes.Buffer
tw := NewWriter(&buffer)
fileName := strings.Repeat("ab", 100)
hdr := &Header{
Name: fileName,
Size: 4,
Typeflag: 0,
}
if err := tw.WriteHeader(hdr); err != nil {
t.Fatalf("Failed to write header: %s", err)
}
if _, err := tw.Write([]byte("fooo")); err != nil {
t.Fatalf("Failed to write the file's data: %s", err)
}
tw.Close()
tr := NewReader(&buffer)
for {
header, err := tr.Next()
if err == io.EOF {
break
}
if err != nil {
t.Fatalf("Failed to read header: %s", err)
}
if header.Typeflag != 0 {
t.Fatalf("Typeflag should've been 0, found %d", header.Typeflag)
}
}
}
func TestWriteAfterClose(t *testing.T) {
var buffer bytes.Buffer
tw := NewWriter(&buffer)
hdr := &Header{
Name: "small.txt",
Size: 5,
}
if err := tw.WriteHeader(hdr); err != nil {
t.Fatalf("Failed to write header: %s", err)
}
tw.Close()
if _, err := tw.Write([]byte("Kilts")); err != ErrWriteAfterClose {
t.Fatalf("Write: got %v; want ErrWriteAfterClose", err)
}
}
func TestSplitUSTARPath(t *testing.T) {
sr := strings.Repeat
vectors := []struct {
input string // Input path
prefix string // Expected output prefix
suffix string // Expected output suffix
ok bool // Split success?
}{
{"", "", "", false},
{"abc", "", "", false},
{"用戶名", "", "", false},
{sr("a", nameSize), "", "", false},
{sr("a", nameSize) + "/", "", "", false},
{sr("a", nameSize) + "/a", sr("a", nameSize), "a", true},
{sr("a", prefixSize) + "/", "", "", false},
{sr("a", prefixSize) + "/a", sr("a", prefixSize), "a", true},
{sr("a", nameSize+1), "", "", false},
{sr("/", nameSize+1), sr("/", nameSize-1), "/", true},
{sr("a", prefixSize) + "/" + sr("b", nameSize),
sr("a", prefixSize), sr("b", nameSize), true},
{sr("a", prefixSize) + "//" + sr("b", nameSize), "", "", false},
{sr("a/", nameSize), sr("a/", 77) + "a", sr("a/", 22), true},
}
for _, v := range vectors {
prefix, suffix, ok := splitUSTARPath(v.input)
if prefix != v.prefix || suffix != v.suffix || ok != v.ok {
t.Errorf("splitUSTARPath(%q):\ngot (%q, %q, %v)\nwant (%q, %q, %v)",
v.input, prefix, suffix, ok, v.prefix, v.suffix, v.ok)
}
}
}
// TestIssue12594 tests that the Writer does not attempt to populate the prefix
// field when encoding a header in the GNU format. The prefix field is valid
// in USTAR and PAX, but not GNU.
func TestIssue12594(t *testing.T) {
names := []string{
"0/1/2/3/4/5/6/7/8/9/10/11/12/13/14/15/16/17/18/19/20/21/22/23/24/25/26/27/28/29/30/file.txt",
"0/1/2/3/4/5/6/7/8/9/10/11/12/13/14/15/16/17/18/19/20/21/22/23/24/25/26/27/28/29/30/31/32/33/file.txt",
"0/1/2/3/4/5/6/7/8/9/10/11/12/13/14/15/16/17/18/19/20/21/22/23/24/25/26/27/28/29/30/31/32/333/file.txt",
"0/1/2/3/4/5/6/7/8/9/10/11/12/13/14/15/16/17/18/19/20/21/22/23/24/25/26/27/28/29/30/31/32/33/34/35/36/37/38/39/40/file.txt",
"0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000/file.txt",
"/home/support/.openoffice.org/3/user/uno_packages/cache/registry/com.sun.star.comp.deployment.executable.PackageRegistryBackend",
}
for i, name := range names {
var b bytes.Buffer
tw := NewWriter(&b)
if err := tw.WriteHeader(&Header{
Name: name,
Uid: 1 << 25, // Prevent USTAR format
}); err != nil {
t.Errorf("test %d, unexpected WriteHeader error: %v", i, err)
}
if err := tw.Close(); err != nil {
t.Errorf("test %d, unexpected Close error: %v", i, err)
}
// The prefix field should never appear in the GNU format.
var blk block
copy(blk[:], b.Bytes())
prefix := string(blk.USTAR().Prefix())
if i := strings.IndexByte(prefix, 0); i >= 0 {
prefix = prefix[:i] // Truncate at the NUL terminator
}
if blk.GetFormat() == formatGNU && len(prefix) > 0 && strings.HasPrefix(name, prefix) {
t.Errorf("test %d, found prefix in GNU format: %s", i, prefix)
}
tr := NewReader(&b)
hdr, err := tr.Next()
if err != nil {
t.Errorf("test %d, unexpected Next error: %v", i, err)
}
if hdr.Name != name {
t.Errorf("test %d, hdr.Name = %s, want %s", i, hdr.Name, name)
}
}
}

View file

@ -341,6 +341,7 @@ func (c *controller) clusterAgentInit() {
// should still be present when cleaning up
// service bindings
c.agentClose()
c.cleanupServiceDiscovery("")
c.cleanupServiceBindings("")
c.agentStopComplete()

View file

@ -646,7 +646,7 @@ func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo,
}
if err = d.storeUpdate(endpoint); err != nil {
return fmt.Errorf("failed to save endpoint %s to store: %v", endpoint.id[0:7], err)
logrus.Errorf("Failed to save endpoint %s to store: %v", endpoint.id[0:7], err)
}
return nil

View file

@ -995,6 +995,10 @@ func (n *network) delete(force bool) error {
logrus.Errorf("Failed leaving network %s from the agent cluster: %v", n.Name(), err)
}
// Cleanup the service discovery for this network
c.cleanupServiceDiscovery(n.ID())
// Cleanup the load balancer
c.cleanupServiceBindings(n.ID())
removeFromStore:

View file

@ -499,7 +499,10 @@ func (nDB *NetworkDB) deleteNodeNetworkEntries(nid, node string) {
// without doing a delete of all the objects
entry.ltime++
}
nDB.createOrUpdateEntry(nid, tname, key, entry)
if !oldEntry.deleting {
nDB.createOrUpdateEntry(nid, tname, key, entry)
}
} else {
// the local node is leaving the network, all the entries of remote nodes can be safely removed
nDB.deleteEntry(nid, tname, key)

View file

@ -161,6 +161,19 @@ func (c *controller) getLBIndex(sid, nid string, ingressPorts []*PortConfig) int
return int(lb.fwMark)
}
// cleanupServiceDiscovery when the network is being deleted, erase all the associated service discovery records
func (c *controller) cleanupServiceDiscovery(cleanupNID string) {
c.Lock()
defer c.Unlock()
if cleanupNID == "" {
logrus.Debugf("cleanupServiceDiscovery for all networks")
c.svcRecords = make(map[string]svcInfo)
return
}
logrus.Debugf("cleanupServiceDiscovery for network:%s", cleanupNID)
delete(c.svcRecords, cleanupNID)
}
func (c *controller) cleanupServiceBindings(cleanupNID string) {
var cleanupFuncs []func()
@ -184,15 +197,6 @@ func (c *controller) cleanupServiceBindings(cleanupNID string) {
continue
}
// The network is being deleted, erase all the associated service discovery records
// TODO(fcrisciani) separate the Load Balancer from the Service discovery, this operation
// can be done safely here, but the rmServiceBinding is still keeping consistency in the
// data structures that are tracking the endpoint to IP mapping.
c.Lock()
logrus.Debugf("cleanupServiceBindings erasing the svcRecords for %s", nid)
delete(c.svcRecords, nid)
c.Unlock()
for eid, ip := range lb.backEnds {
epID := eid
epIP := ip