From d5151ca8ab5d90300839c8572cb96577de4d6233 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Wed, 3 Sep 2014 16:26:19 +0200 Subject: [PATCH 1/9] Implement Docker on ZFS MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Arthur Gautier Signed-off-by: Jörg Thalheim --- Dockerfile | 5 + contrib/check-config.sh | 21 + daemon/daemon_zfs.go | 7 + daemon/graphdriver/driver.go | 1 + daemon/graphdriver/zfs/MAINTAINERS | 1 + daemon/graphdriver/zfs/zfs.go | 375 +++++++++++++++++ daemon/graphdriver/zfs/zfs_test.go | 28 ++ docs/sources/reference/commandline/cli.md | 8 +- hack/vendor.sh | 10 + project/PACKAGERS.md | 1 + .../github.com/mistifyio/go-zfs/.gitignore | 1 + .../mistifyio/go-zfs/CONTRIBUTING.md | 51 +++ .../src/github.com/mistifyio/go-zfs/LICENSE | 201 +++++++++ .../src/github.com/mistifyio/go-zfs/README.md | 54 +++ .../src/github.com/mistifyio/go-zfs/error.go | 18 + .../github.com/mistifyio/go-zfs/error_test.go | 37 ++ .../src/github.com/mistifyio/go-zfs/utils.go | 320 +++++++++++++++ vendor/src/github.com/mistifyio/go-zfs/zfs.go | 382 ++++++++++++++++++ .../github.com/mistifyio/go-zfs/zfs_test.go | 357 ++++++++++++++++ .../src/github.com/mistifyio/go-zfs/zpool.go | 108 +++++ 20 files changed, 1985 insertions(+), 1 deletion(-) create mode 100644 daemon/daemon_zfs.go create mode 100644 daemon/graphdriver/zfs/MAINTAINERS create mode 100644 daemon/graphdriver/zfs/zfs.go create mode 100644 daemon/graphdriver/zfs/zfs_test.go create mode 100644 vendor/src/github.com/mistifyio/go-zfs/.gitignore create mode 100644 vendor/src/github.com/mistifyio/go-zfs/CONTRIBUTING.md create mode 100644 vendor/src/github.com/mistifyio/go-zfs/LICENSE create mode 100644 vendor/src/github.com/mistifyio/go-zfs/README.md create mode 100644 vendor/src/github.com/mistifyio/go-zfs/error.go create mode 100644 vendor/src/github.com/mistifyio/go-zfs/error_test.go create mode 100644 vendor/src/github.com/mistifyio/go-zfs/utils.go create mode 100644 vendor/src/github.com/mistifyio/go-zfs/zfs.go create mode 100644 vendor/src/github.com/mistifyio/go-zfs/zfs_test.go create mode 100644 vendor/src/github.com/mistifyio/go-zfs/zpool.go diff --git a/Dockerfile b/Dockerfile index 2b49fc1e0b..9b6a79b025 100644 --- a/Dockerfile +++ b/Dockerfile @@ -26,6 +26,9 @@ FROM ubuntu:14.04 MAINTAINER Tianon Gravi (@tianon) +RUN apt-key adv --keyserver pool.sks-keyservers.net --recv-keys E871F18B51E0147C77796AC81196BA81F6B0FC61 +RUN echo deb http://ppa.launchpad.net/zfs-native/stable/ubuntu trusty main > /etc/apt/sources.list.d/zfs.list + # Packaged dependencies RUN apt-get update && apt-get install -y \ apparmor \ @@ -50,6 +53,8 @@ RUN apt-get update && apt-get install -y \ ruby1.9.1 \ ruby1.9.1-dev \ s3cmd=1.1.0* \ + ubuntu-zfs \ + libzfs-dev \ --no-install-recommends # Get lvm2 source for compiling statically diff --git a/contrib/check-config.sh b/contrib/check-config.sh index 8c55de590e..dd84497b08 100755 --- a/contrib/check-config.sh +++ b/contrib/check-config.sh @@ -83,6 +83,22 @@ check_flags() { done } +check_command() { + if command -v "$1" >/dev/null 2>&1; then + wrap_good "$1 command" 'available' + else + wrap_bad "$1 command" 'missing' + fi +} + +check_device() { + if [ -c "$1" ]; then + wrap_good "$1" 'present' + else + wrap_bad "$1" 'missing' + fi +} + if [ ! -e "$CONFIG" ]; then wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config..." for tryConfig in "${possibleConfigs[@]}"; do @@ -182,6 +198,11 @@ echo '- Storage Drivers:' echo '- "'$(wrap_color 'overlay' blue)'":' check_flags OVERLAY_FS EXT4_FS_SECURITY EXT4_FS_POSIX_ACL | sed 's/^/ /' + + echo '- "'$(wrap_color 'zfs' blue)'":' + echo " - $(check_device /dev/zfs)" + echo " - $(check_command zfs)" + echo " - $(check_command zpool)" } | sed 's/^/ /' echo diff --git a/daemon/daemon_zfs.go b/daemon/daemon_zfs.go new file mode 100644 index 0000000000..372e604183 --- /dev/null +++ b/daemon/daemon_zfs.go @@ -0,0 +1,7 @@ +// +build !exclude_graphdriver_zfs + +package daemon + +import ( + _ "github.com/docker/docker/daemon/graphdriver/zfs" +) diff --git a/daemon/graphdriver/driver.go b/daemon/graphdriver/driver.go index c57dd87136..399a0503c9 100644 --- a/daemon/graphdriver/driver.go +++ b/daemon/graphdriver/driver.go @@ -40,6 +40,7 @@ var ( priority = []string{ "aufs", "btrfs", + "zfs", "devicemapper", "overlay", "vfs", diff --git a/daemon/graphdriver/zfs/MAINTAINERS b/daemon/graphdriver/zfs/MAINTAINERS new file mode 100644 index 0000000000..88c4bdfaf7 --- /dev/null +++ b/daemon/graphdriver/zfs/MAINTAINERS @@ -0,0 +1 @@ +Arthur Gautier (@baloose) diff --git a/daemon/graphdriver/zfs/zfs.go b/daemon/graphdriver/zfs/zfs.go new file mode 100644 index 0000000000..8873c4736f --- /dev/null +++ b/daemon/graphdriver/zfs/zfs.go @@ -0,0 +1,375 @@ +package zfs + +/* +#include +#include +#include +#include +*/ +import "C" + +import ( + "fmt" + "os" + "os/exec" + "path" + "strconv" + "strings" + "syscall" + "time" + "unsafe" + + log "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/parsers" + zfs "github.com/mistifyio/go-zfs" +) + +type ZfsOptions struct { + fsName string + mountPath string +} + +func init() { + graphdriver.Register("zfs", Init) +} + +type Logger struct{} + +func (*Logger) Log(cmd []string) { + log.Debugf("[zfs] %s", strings.Join(cmd, " ")) +} + +func Init(base string, opt []string) (graphdriver.Driver, error) { + var err error + options, err := parseOptions(opt) + options.mountPath = base + if err != nil { + return nil, err + } + + rootdir := path.Dir(base) + + if options.fsName == "" { + err = checkRootdirFs(rootdir) + if err != nil { + return nil, err + } + } + + if _, err := exec.LookPath("zfs"); err != nil { + return nil, fmt.Errorf("zfs command is not available") + } + + file, err := os.OpenFile("/dev/zfs", os.O_RDWR, 600) + defer file.Close() + if err != nil { + return nil, fmt.Errorf("Failed to initialize: %v", err) + } + + if options.fsName == "" { + options.fsName, err = lookupZfsDataset(rootdir) + if err != nil { + return nil, err + } + } + + logger := Logger{} + zfs.SetLogger(&logger) + + dataset, err := zfs.GetDataset(options.fsName) + if err != nil { + return nil, fmt.Errorf("Cannot open %s", options.fsName) + } + + return &Driver{ + dataset: dataset, + options: options, + }, nil +} + +func parseOptions(opt []string) (ZfsOptions, error) { + var options ZfsOptions + options.fsName = "" + for _, option := range opt { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return options, err + } + key = strings.ToLower(key) + switch key { + case "zfs.fsname": + options.fsName = val + default: + return options, fmt.Errorf("Unknown option %s\n", key) + } + } + return options, nil +} + +func checkRootdirFs(rootdir string) error { + var buf syscall.Statfs_t + if err := syscall.Statfs(rootdir, &buf); err != nil { + fmt.Errorf("Failed to access '%s': %s", rootdir, err) + } + + if graphdriver.FsMagic(buf.Type) != graphdriver.FsMagicZfs { + log.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir) + return graphdriver.ErrPrerequisites + } + return nil +} + +var CprocMounts = C.CString("/proc/mounts") +var CopenMod = C.CString("r") + +func lookupZfsDataset(rootdir string) (string, error) { + var stat syscall.Stat_t + var Cmnt C.struct_mntent + var Cfp *C.FILE + buf := string(make([]byte, 256, 256)) + Cbuf := C.CString(buf) + defer free(Cbuf) + + if err := syscall.Stat(rootdir, &stat); err != nil { + return "", fmt.Errorf("Failed to access '%s': %s", rootdir, err) + } + wantedDev := stat.Dev + + if Cfp = C.setmntent(CprocMounts, CopenMod); Cfp == nil { + return "", fmt.Errorf("Failed to open /proc/mounts") + } + defer C.endmntent(Cfp) + + for C.getmntent_r(Cfp, &Cmnt, Cbuf, 256) != nil { + dir := C.GoString(Cmnt.mnt_dir) + if err := syscall.Stat(dir, &stat); err != nil { + log.Debugf("[zfs] failed to stat '%s' while scanning for zfs mount: %v", dir, err) + continue // may fail on fuse file systems + } + + fs := C.GoString(Cmnt.mnt_type) + if stat.Dev == wantedDev && fs == "zfs" { + return C.GoString(Cmnt.mnt_fsname), nil + } + } + // should never happen + return "", fmt.Errorf("Failed to find zfs pool in /proc/mounts") +} + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +type Driver struct { + dataset *zfs.Dataset + options ZfsOptions +} + +func (d *Driver) String() string { + return "zfs" +} + +func (d *Driver) Cleanup() error { + return nil +} + +func (d *Driver) Status() [][2]string { + parts := strings.Split(d.dataset.Name, "/") + pool, err := zfs.GetZpool(parts[0]) + + if err != nil { + return [][2]string{ + {"error while getting pool", fmt.Sprintf("%v", err)}, + } + } + var quota string + if d.dataset.Quota == 0 { + quota = strconv.FormatUint(d.dataset.Quota, 10) + } else { + quota = "no" + } + + return [][2]string{ + {"Zpool", pool.Name}, + {"Zpool Health", pool.Health}, + {"Parent Dataset", d.dataset.Name}, + {"Space Used By Parent", strconv.FormatUint(d.dataset.Used, 10)}, + {"Space Available", strconv.FormatUint(d.dataset.Avail, 10)}, + {"Parent Quota", quota}, + {"Compression", d.dataset.Compression}, + } +} + +func cloneFilesystem(id, parent, mountpoint string) error { + parentDataset, err := zfs.GetDataset(parent) + if parentDataset == nil { + return err + } + snapshotName := fmt.Sprintf("%d", time.Now().Nanosecond()) + snapshot, err := parentDataset.Snapshot(snapshotName /*recursive */, false) + if snapshot == nil { + return err + } + + _, err = snapshot.Clone(id, map[string]string{ + "mountpoint": mountpoint, + }) + if err != nil { + snapshot.Destroy(zfs.DestroyDeferDeletion) + return err + } + err = snapshot.Destroy(zfs.DestroyDeferDeletion) + return err +} + +func (d *Driver) ZfsPath(id string) string { + return d.options.fsName + "/" + id +} + +func (d *Driver) Create(id string, parent string) error { + mountPoint := path.Join(d.options.mountPath, "graph", id) + datasetName := d.ZfsPath(id) + dataset, err := zfs.GetDataset(datasetName) + if err == nil { + // cleanup existing dataset from an aborted build + dataset.Destroy(zfs.DestroyRecursiveClones) + } + + if parent == "" { + _, err := zfs.CreateFilesystem(datasetName, map[string]string{ + "mountpoint": mountPoint, + }) + return err + } else { + return cloneFilesystem(datasetName, d.ZfsPath(parent), mountPoint) + } + return nil +} + +func (d *Driver) Remove(id string) error { + dataset, err := zfs.GetDataset(d.ZfsPath(id)) + if dataset == nil { + return err + } + + return dataset.Destroy(zfs.DestroyRecursive) +} + +func (d *Driver) Get(id, mountLabel string) (string, error) { + dataset, err := zfs.GetDataset(d.ZfsPath(id)) + if dataset == nil { + return "", err + } else { + return dataset.Mountpoint, nil + } +} + +func (d *Driver) Put(id string) error { + // FS is already mounted + return nil +} + +func (d *Driver) Exists(id string) bool { + _, err := zfs.GetDataset(d.ZfsPath(id)) + return err == nil +} + +func zfsChanges(dataset *zfs.Dataset) ([]archive.Change, error) { + if dataset.Origin == "" { // should never happen + return nil, fmt.Errorf("no origin found for dataset '%s'. expected a clone", dataset.Name) + } + changes, err := dataset.Diff(dataset.Origin) + if err != nil { + return nil, err + } + + // for rename changes, we have to add a ADD and a REMOVE + renameCount := 0 + for _, change := range changes { + if change.Change == zfs.Renamed { + renameCount++ + } + } + archiveChanges := make([]archive.Change, len(changes)+renameCount) + i := 0 + for _, change := range changes { + var changeType archive.ChangeType + mountpointLen := len(dataset.Mountpoint) + basePath := change.Path[mountpointLen:] + switch change.Change { + case zfs.Renamed: + archiveChanges[i] = archive.Change{basePath, archive.ChangeDelete} + newBasePath := change.NewPath[mountpointLen:] + archiveChanges[i+1] = archive.Change{newBasePath, archive.ChangeAdd} + i += 2 + continue + case zfs.Created: + changeType = archive.ChangeAdd + case zfs.Modified: + changeType = archive.ChangeModify + case zfs.Removed: + changeType = archive.ChangeDelete + } + archiveChanges[i] = archive.Change{basePath, changeType} + i++ + } + + return archiveChanges, nil +} + +func (d *Driver) Diff(id, parent string) (archive.Archive, error) { + dataset, err := zfs.GetDataset(d.ZfsPath(id)) + if err != nil { + return nil, err + } + changes, err := zfsChanges(dataset) + if err != nil { + return nil, err + } + + archive, err := archive.ExportChanges(dataset.Mountpoint, changes) + if err != nil { + return nil, err + } + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + d.Put(id) + return err + }), nil +} + +func (d *Driver) DiffSize(id, parent string) (bytes int64, err error) { + dataset, err := zfs.GetDataset(d.ZfsPath(id)) + if err == nil { + return int64((*dataset).Logicalused), nil + } else { + return -1, err + } +} + +func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { + dataset, err := zfs.GetDataset(d.ZfsPath(id)) + if err != nil { + return nil, err + } + return zfsChanges(dataset) +} + +func (d *Driver) ApplyDiff(id, parent string, diff archive.ArchiveReader) (int64, error) { + dataset, err := zfs.GetDataset(d.ZfsPath(id)) + if err != nil { + return -1, err + } + _, err = archive.ApplyLayer(dataset.Mountpoint, diff) + if err != nil { + return -1, err + } + updatedDataset, err := zfs.GetDataset(d.ZfsPath(id)) + if err != nil { + return -1, err + } + return int64(updatedDataset.Logicalused), nil +} diff --git a/daemon/graphdriver/zfs/zfs_test.go b/daemon/graphdriver/zfs/zfs_test.go new file mode 100644 index 0000000000..f87aab36bb --- /dev/null +++ b/daemon/graphdriver/zfs/zfs_test.go @@ -0,0 +1,28 @@ +package zfs + +import ( + "github.com/docker/docker/daemon/graphdriver/graphtest" + "testing" +) + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestZfsSetup and TestZfsTeardown +func TestZfsSetup(t *testing.T) { + graphtest.GetDriver(t, "zfs") +} + +func TestZfsCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "zfs") +} + +func TestZfsCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "zfs") +} + +func TestZfsCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "zfs") +} + +func TestZfsTeardown(t *testing.T) { + graphtest.PutDriver(t) +} diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index 1efc528b09..aa0b1fd2d5 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -250,7 +250,7 @@ precedence over `HTTP_PROXY`. ### Daemon storage-driver option The Docker daemon has support for several different image layer storage drivers: `aufs`, -`devicemapper`, `btrfs` and `overlay`. +`devicemapper`, `btrfs`, `zfs` and `overlay`. The `aufs` driver is the oldest, but is based on a Linux kernel patch-set that is unlikely to be merged into the main kernel. These are also known to cause some @@ -272,6 +272,12 @@ explains how to tune your existing setup without the use of options. The `btrfs` driver is very fast for `docker build` - but like `devicemapper` does not share executable memory between devices. Use `docker -d -s btrfs -g /mnt/btrfs_partition`. +The `zfs` driver is probably not fast as `btrfs` but has a longer track record +on stability. Thanks to `Single Copy ARC` shared blocks between clones will be +cached only once. Use `docker -d -s zfs`. To select a different zfs filesystem +as backingstore use the storage option `zfs.fsname`: +`docker -d -s zfs --storage-opt zfs.fsname=zroot/docker` + The `overlay` is a very fast union filesystem. It is now merged in the main Linux kernel as of [3.18.0](https://lkml.org/lkml/2014/10/26/137). Call `docker -d -s overlay` to use it. diff --git a/hack/vendor.sh b/hack/vendor.sh index d51bad575f..7ab7b0233c 100755 --- a/hack/vendor.sh +++ b/hack/vendor.sh @@ -54,6 +54,16 @@ clone git github.com/tchap/go-patricia v2.1.0 clone hg code.google.com/p/go.net 84a4013f96e0 clone hg code.google.com/p/gosqlite 74691fb6f837 +clone git github.com/docker/libtrust 230dfd18c232 + +clone git github.com/Sirupsen/logrus v0.7.2 + +clone git github.com/go-fsnotify/fsnotify v1.2.0 + +clone git github.com/go-check/check 64131543e7896d5bcc6bd5a76287eb75ea96c673 + +clone git github.com/mistifyio/go-zfs v2.1.0 + # get distribution packages clone git github.com/docker/distribution d957768537c5af40e4f4cd96871f7b2bde9e2923 mv src/github.com/docker/distribution/digest tmp-digest diff --git a/project/PACKAGERS.md b/project/PACKAGERS.md index d321a900d6..fd2156c585 100644 --- a/project/PACKAGERS.md +++ b/project/PACKAGERS.md @@ -303,6 +303,7 @@ by having support for them in the kernel or userspace. A few examples include: * AUFS graph driver (requires AUFS patches/support enabled in the kernel, and at least the "auplink" utility from aufs-tools) * BTRFS graph driver (requires BTRFS support enabled in the kernel) +* ZFS graph driver (requires userspace zfs-utils and a corresponding kernel module) ## Daemon Init Script diff --git a/vendor/src/github.com/mistifyio/go-zfs/.gitignore b/vendor/src/github.com/mistifyio/go-zfs/.gitignore new file mode 100644 index 0000000000..8000dd9db4 --- /dev/null +++ b/vendor/src/github.com/mistifyio/go-zfs/.gitignore @@ -0,0 +1 @@ +.vagrant diff --git a/vendor/src/github.com/mistifyio/go-zfs/CONTRIBUTING.md b/vendor/src/github.com/mistifyio/go-zfs/CONTRIBUTING.md new file mode 100644 index 0000000000..66aab8e359 --- /dev/null +++ b/vendor/src/github.com/mistifyio/go-zfs/CONTRIBUTING.md @@ -0,0 +1,51 @@ +## How to Contribute ## + +We always welcome contributions to help make `go-zfs` better. Please take a moment to read this document if you would like to contribute. + +### Reporting issues ### + +We use [Github issues](https://github.com/mistifyio/go-zfs/issues) to track bug reports, feature requests, and submitting pull requests. + +If you find a bug: + +* Use the GitHub issue search to check whether the bug has already been reported. +* If the issue has been fixed, try to reproduce the issue using the latest `master` branch of the repository. +* If the issue still reproduces or has not yet been reported, try to isolate the problem before opening an issue, if possible. Also provide the steps taken to reproduce the bug. + +### Pull requests ### + +We welcome bug fixes, improvements, and new features. Before embarking on making significant changes, please open an issue and ask first so that you do not risk duplicating efforts or spending time working on something that may be out of scope. For minor items, just open a pull request. + +[Fork the project](https://help.github.com/articles/fork-a-repo), clone your fork, and add the upstream to your remote: + + $ git clone git@github.com:/go-zfs.git + $ cd go-zfs + $ git remote add upstream https://github.com/mistifyio/go-zfs.git + +If you need to pull new changes committed upstream: + + $ git checkout master + $ git fetch upstream + $ git merge upstream/master + +Don' work directly on master as this makes it harder to merge later. Create a feature branch for your fix or new feature: + + $ git checkout -b + +Please try to commit your changes in logical chunks. Ideally, you should include the issue number in the commit message. + + $ git commit -m "Issue # - " + +Push your feature branch to your fork. + + $ git push origin + +[Open a Pull Request](https://help.github.com/articles/using-pull-requests) against the upstream master branch. Please give your pull request a clear title and description and note which issue(s) your pull request fixes. + +* All Go code should be formatted using [gofmt](http://golang.org/cmd/gofmt/). +* Every exported function should have [documentation](http://blog.golang.org/godoc-documenting-go-code) and corresponding [tests](http://golang.org/doc/code.html#Testing). + +**Important:** By submitting a patch, you agree to allow the project owners to license your work under the [Apache 2.0 License](./LICENSE). + +---- +Guidelines based on http://azkaban.github.io/contributing.html diff --git a/vendor/src/github.com/mistifyio/go-zfs/LICENSE b/vendor/src/github.com/mistifyio/go-zfs/LICENSE new file mode 100644 index 0000000000..f4c265cfec --- /dev/null +++ b/vendor/src/github.com/mistifyio/go-zfs/LICENSE @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright (c) 2014, OmniTI Computer Consulting, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/src/github.com/mistifyio/go-zfs/README.md b/vendor/src/github.com/mistifyio/go-zfs/README.md new file mode 100644 index 0000000000..2515e588e0 --- /dev/null +++ b/vendor/src/github.com/mistifyio/go-zfs/README.md @@ -0,0 +1,54 @@ +# Go Wrapper for ZFS # + +Simple wrappers for ZFS command line tools. + +[![GoDoc](https://godoc.org/github.com/mistifyio/go-zfs?status.svg)](https://godoc.org/github.com/mistifyio/go-zfs) + +## Requirements ## + +You need a working ZFS setup. To use on Ubuntu 14.04, setup ZFS: + + sudo apt-get install python-software-properties + sudo apt-add-repository ppa:zfs-native/stable + sudo apt-get update + sudo apt-get install ubuntu-zfs libzfs-dev + +Developed using Go 1.3, but currently there isn't anything 1.3 specific. Don't use Ubuntu packages for Go, use http://golang.org/doc/install + +Generally you need root privileges to use anything zfs related. + +## Status ## + +This has been only been tested on Ubuntu 14.04 + +In the future, we hope to work directly with libzfs. + +# Hacking # + +The tests have decent examples for most functions. + +```go +//assuming a zpool named test +//error handling ommitted + + +f, err := zfs.CreateFilesystem("test/snapshot-test", nil) +ok(t, err) + +s, err := f.Snapshot("test", nil) +ok(t, err) + +// snapshot is named "test/snapshot-test@test" + +c, err := s.Clone("test/clone-test", nil) + +err := c.Destroy() +err := s.Destroy() +err := f.Destroy() + +``` + +# Contributing # + +See the [contributing guidelines](./CONTRIBUTING.md) + diff --git a/vendor/src/github.com/mistifyio/go-zfs/error.go b/vendor/src/github.com/mistifyio/go-zfs/error.go new file mode 100644 index 0000000000..5408ccdb55 --- /dev/null +++ b/vendor/src/github.com/mistifyio/go-zfs/error.go @@ -0,0 +1,18 @@ +package zfs + +import ( + "fmt" +) + +// Error is an error which is returned when the `zfs` or `zpool` shell +// commands return with a non-zero exit code. +type Error struct { + Err error + Debug string + Stderr string +} + +// Error returns the string representation of an Error. +func (e Error) Error() string { + return fmt.Sprintf("%s: %q => %s", e.Err, e.Debug, e.Stderr) +} diff --git a/vendor/src/github.com/mistifyio/go-zfs/error_test.go b/vendor/src/github.com/mistifyio/go-zfs/error_test.go new file mode 100644 index 0000000000..323980ec6d --- /dev/null +++ b/vendor/src/github.com/mistifyio/go-zfs/error_test.go @@ -0,0 +1,37 @@ +package zfs + +import ( + "errors" + "fmt" + "testing" +) + +func TestError(t *testing.T) { + var tests = []struct { + err error + debug string + stderr string + }{ + // Empty error + {nil, "", ""}, + // Typical error + {errors.New("exit status foo"), "/sbin/foo bar qux", "command not found"}, + // Quoted error + {errors.New("exit status quoted"), "\"/sbin/foo\" bar qux", "\"some\" 'random' `quotes`"}, + } + + for _, test := range tests { + // Generate error from tests + zErr := Error{ + Err: test.err, + Debug: test.debug, + Stderr: test.stderr, + } + + // Verify output format is consistent, so that any changes to the + // Error method must be reflected by the test + if str := zErr.Error(); str != fmt.Sprintf("%s: %q => %s", test.err, test.debug, test.stderr) { + t.Fatalf("unexpected Error string: %v", str) + } + } +} diff --git a/vendor/src/github.com/mistifyio/go-zfs/utils.go b/vendor/src/github.com/mistifyio/go-zfs/utils.go new file mode 100644 index 0000000000..250bd5b31c --- /dev/null +++ b/vendor/src/github.com/mistifyio/go-zfs/utils.go @@ -0,0 +1,320 @@ +package zfs + +import ( + "bytes" + "fmt" + "io" + "os/exec" + "regexp" + "strconv" + "strings" +) + +type command struct { + Command string + Stdin io.Reader + Stdout io.Writer +} + +func (c *command) Run(arg ...string) ([][]string, error) { + + cmd := exec.Command(c.Command, arg...) + + var stdout, stderr bytes.Buffer + + if c.Stdout == nil { + cmd.Stdout = &stdout + } else { + cmd.Stdout = c.Stdout + } + + if c.Stdin != nil { + cmd.Stdin = c.Stdin + + } + cmd.Stderr = &stderr + + debug := strings.Join([]string{cmd.Path, strings.Join(cmd.Args, " ")}, " ") + if logger != nil { + logger.Log(cmd.Args) + } + err := cmd.Run() + + if err != nil { + return nil, &Error{ + Err: err, + Debug: debug, + Stderr: stderr.String(), + } + } + + // assume if you passed in something for stdout, that you know what to do with it + if c.Stdout != nil { + return nil, nil + } + + lines := strings.Split(stdout.String(), "\n") + + //last line is always blank + lines = lines[0 : len(lines)-1] + output := make([][]string, len(lines)) + + for i, l := range lines { + output[i] = strings.Fields(l) + } + + return output, nil +} + +func setString(field *string, value string) { + v := "" + if value != "-" { + v = value + } + *field = v +} + +func setUint(field *uint64, value string) error { + var v uint64 + if value != "-" { + var err error + v, err = strconv.ParseUint(value, 10, 64) + if err != nil { + return err + } + } + *field = v + return nil +} + +func (ds *Dataset) parseLine(line []string) error { + prop := line[1] + val := line[2] + + var err error + + switch prop { + case "available": + err = setUint(&ds.Avail, val) + case "compression": + setString(&ds.Compression, val) + case "mountpoint": + setString(&ds.Mountpoint, val) + case "quota": + err = setUint(&ds.Quota, val) + case "type": + setString(&ds.Type, val) + case "origin": + setString(&ds.Origin, val) + case "used": + err = setUint(&ds.Used, val) + case "volsize": + err = setUint(&ds.Volsize, val) + case "written": + err = setUint(&ds.Written, val) + case "logicalused": + err = setUint(&ds.Logicalused, val) + } + return err +} + +/* + * from zfs diff`s escape function: + * + * Prints a file name out a character at a time. If the character is + * not in the range of what we consider "printable" ASCII, display it + * as an escaped 3-digit octal value. ASCII values less than a space + * are all control characters and we declare the upper end as the + * DELete character. This also is the last 7-bit ASCII character. + * We choose to treat all 8-bit ASCII as not printable for this + * application. + */ +func unescapeFilepath(path string) (string, error) { + buf := make([]byte, 0, len(path)) + llen := len(path) + for i := 0; i < llen; { + if path[i] == '\\' { + if llen < i+4 { + return "", fmt.Errorf("Invalid octal code: too short") + } + octalCode := path[(i + 1):(i + 4)] + val, err := strconv.ParseUint(octalCode, 8, 8) + if err != nil { + return "", fmt.Errorf("Invalid octal code: %v", err) + } + buf = append(buf, byte(val)) + i += 4 + } else { + buf = append(buf, path[i]) + i++ + } + } + return string(buf), nil +} + +var changeTypeMap = map[string]ChangeType{ + "-": Removed, + "+": Created, + "M": Modified, + "R": Renamed, +} +var inodeTypeMap = map[string]InodeType{ + "B": BlockDevice, + "C": CharacterDevice, + "/": Directory, + ">": Door, + "|": NamedPipe, + "@": SymbolicLink, + "P": EventPort, + "=": Socket, + "F": File, +} + +// matches (+1) or (-1) +var referenceCountRegex = regexp.MustCompile("\\(([+-]\\d+?)\\)") + +func parseReferenceCount(field string) (int, error) { + matches := referenceCountRegex.FindStringSubmatch(field) + if matches == nil { + return 0, fmt.Errorf("Regexp does not match") + } + return strconv.Atoi(matches[1]) +} + +func parseInodeChange(line []string) (*InodeChange, error) { + llen := len(line) + if llen < 1 { + return nil, fmt.Errorf("Empty line passed") + } + + changeType := changeTypeMap[line[0]] + if changeType == 0 { + return nil, fmt.Errorf("Unknown change type '%s'", line[0]) + } + + switch changeType { + case Renamed: + if llen != 4 { + return nil, fmt.Errorf("Mismatching number of fields: expect 4, got: %d", llen) + } + case Modified: + if llen != 4 && llen != 3 { + return nil, fmt.Errorf("Mismatching number of fields: expect 3..4, got: %d", llen) + } + default: + if llen != 3 { + return nil, fmt.Errorf("Mismatching number of fields: expect 3, got: %d", llen) + } + } + + inodeType := inodeTypeMap[line[1]] + if inodeType == 0 { + return nil, fmt.Errorf("Unknown inode type '%s'", line[1]) + } + + path, err := unescapeFilepath(line[2]) + if err != nil { + return nil, fmt.Errorf("Failed to parse filename: %v", err) + } + + var newPath string + var referenceCount int + switch changeType { + case Renamed: + newPath, err = unescapeFilepath(line[3]) + if err != nil { + return nil, fmt.Errorf("Failed to parse filename: %v", err) + } + case Modified: + if llen == 4 { + referenceCount, err = parseReferenceCount(line[3]) + if err != nil { + return nil, fmt.Errorf("Failed to parse reference count: %v", err) + } + } + default: + newPath = "" + } + + return &InodeChange{ + Change: changeType, + Type: inodeType, + Path: path, + NewPath: newPath, + ReferenceCountChange: referenceCount, + }, nil +} + +// example input +//M / /testpool/bar/ +//+ F /testpool/bar/hello.txt +//M / /testpool/bar/hello.txt (+1) +//M / /testpool/bar/hello-hardlink +func parseInodeChanges(lines [][]string) ([]*InodeChange, error) { + changes := make([]*InodeChange, len(lines)) + + for i, line := range lines { + c, err := parseInodeChange(line) + if err != nil { + return nil, fmt.Errorf("Failed to parse line %d of zfs diff: %v, got: '%s'", i, err, line) + } + changes[i] = c + } + return changes, nil +} + +func listByType(t, filter string) ([]*Dataset, error) { + args := []string{"get", "all", "-t", t, "-rHp"} + if filter != "" { + args = append(args, filter) + } + out, err := zfs(args...) + if err != nil { + return nil, err + } + + var datasets []*Dataset + + name := "" + var ds *Dataset + for _, line := range out { + if name != line[0] { + name = line[0] + ds = &Dataset{Name: name} + datasets = append(datasets, ds) + } + if err := ds.parseLine(line); err != nil { + return nil, err + } + } + + return datasets, nil +} + +func propsSlice(properties map[string]string) []string { + args := make([]string, 0, len(properties)*3) + for k, v := range properties { + args = append(args, "-o") + args = append(args, fmt.Sprintf("%s=%s", k, v)) + } + return args +} + +func (z *Zpool) parseLine(line []string) error { + prop := line[1] + val := line[2] + + var err error + + switch prop { + case "health": + setString(&z.Health, val) + case "allocated": + err = setUint(&z.Allocated, val) + case "size": + err = setUint(&z.Size, val) + case "free": + err = setUint(&z.Free, val) + } + return err +} diff --git a/vendor/src/github.com/mistifyio/go-zfs/zfs.go b/vendor/src/github.com/mistifyio/go-zfs/zfs.go new file mode 100644 index 0000000000..f43bea292e --- /dev/null +++ b/vendor/src/github.com/mistifyio/go-zfs/zfs.go @@ -0,0 +1,382 @@ +// Package zfs provides wrappers around the ZFS command line tools. +package zfs + +import ( + "errors" + "fmt" + "io" + "strconv" + "strings" +) + +// ZFS dataset types, which can indicate if a dataset is a filesystem, +// snapshot, or volume. +const ( + DatasetFilesystem = "filesystem" + DatasetSnapshot = "snapshot" + DatasetVolume = "volume" +) + +// Dataset is a ZFS dataset. A dataset could be a clone, filesystem, snapshot, +// or volume. The Type struct member can be used to determine a dataset's type. +// +// The field definitions can be found in the ZFS manual: +// http://www.freebsd.org/cgi/man.cgi?zfs(8). +type Dataset struct { + Name string + Origin string + Used uint64 + Avail uint64 + Mountpoint string + Compression string + Type string + Written uint64 + Volsize uint64 + Usedbydataset uint64 + Logicalused uint64 + Quota uint64 +} + +// InodeType is the type of inode as reported by Diff +type InodeType int + +// Types of Inodes +const ( + _ = iota // 0 == unknown type + BlockDevice InodeType = iota + CharacterDevice + Directory + Door + NamedPipe + SymbolicLink + EventPort + Socket + File +) + +// ChangeType is the type of inode change as reported by Diff +type ChangeType int + +// Types of Changes +const ( + _ = iota // 0 == unknown type + Removed ChangeType = iota + Created + Modified + Renamed +) + +// DestroyFlag is the options flag passed to Destroy +type DestroyFlag int + +// Valid destroy options +const ( + DestroyDefault DestroyFlag = 1 << iota + DestroyRecursive = 1 << iota + DestroyRecursiveClones = 1 << iota + DestroyDeferDeletion = 1 << iota + DestroyForceUmount = 1 << iota +) + +// InodeChange represents a change as reported by Diff +type InodeChange struct { + Change ChangeType + Type InodeType + Path string + NewPath string + ReferenceCountChange int +} + +// Logger can be used to log commands/actions +type Logger interface { + Log(cmd []string) +} + +var logger Logger + +// SetLogger set a log handler to log all commands including arguments before +// they are executed +func SetLogger(l Logger) { + logger = l +} + +// zfs is a helper function to wrap typical calls to zfs. +func zfs(arg ...string) ([][]string, error) { + c := command{Command: "zfs"} + return c.Run(arg...) +} + +// Datasets returns a slice of ZFS datasets, regardless of type. +// A filter argument may be passed to select a dataset with the matching name, +// or empty string ("") may be used to select all datasets. +func Datasets(filter string) ([]*Dataset, error) { + return listByType("all", filter) +} + +// Snapshots returns a slice of ZFS snapshots. +// A filter argument may be passed to select a snapshot with the matching name, +// or empty string ("") may be used to select all snapshots. +func Snapshots(filter string) ([]*Dataset, error) { + return listByType(DatasetSnapshot, filter) +} + +// Filesystems returns a slice of ZFS filesystems. +// A filter argument may be passed to select a filesystem with the matching name, +// or empty string ("") may be used to select all filesystems. +func Filesystems(filter string) ([]*Dataset, error) { + return listByType(DatasetFilesystem, filter) +} + +// Volumes returns a slice of ZFS volumes. +// A filter argument may be passed to select a volume with the matching name, +// or empty string ("") may be used to select all volumes. +func Volumes(filter string) ([]*Dataset, error) { + return listByType(DatasetVolume, filter) +} + +// GetDataset retrieves a single ZFS dataset by name. This dataset could be +// any valid ZFS dataset type, such as a clone, filesystem, snapshot, or volume. +func GetDataset(name string) (*Dataset, error) { + out, err := zfs("get", "all", "-Hp", name) + if err != nil { + return nil, err + } + + ds := &Dataset{Name: name} + for _, line := range out { + if err := ds.parseLine(line); err != nil { + return nil, err + } + } + + return ds, nil +} + +// Clone clones a ZFS snapshot and returns a clone dataset. +// An error will be returned if the input dataset is not of snapshot type. +func (d *Dataset) Clone(dest string, properties map[string]string) (*Dataset, error) { + if d.Type != DatasetSnapshot { + return nil, errors.New("can only clone snapshots") + } + args := make([]string, 2, 4) + args[0] = "clone" + args[1] = "-p" + if properties != nil { + args = append(args, propsSlice(properties)...) + } + args = append(args, []string{d.Name, dest}...) + _, err := zfs(args...) + if err != nil { + return nil, err + } + return GetDataset(dest) +} + +// ReceiveSnapshot receives a ZFS stream from the input io.Reader, creates a +// new snapshot with the specified name, and streams the input data into the +// newly-created snapshot. +func ReceiveSnapshot(input io.Reader, name string) (*Dataset, error) { + c := command{Command: "zfs", Stdin: input} + _, err := c.Run("receive", name) + if err != nil { + return nil, err + } + return GetDataset(name) +} + +// SendSnapshot sends a ZFS stream of a snapshot to the input io.Writer. +// An error will be returned if the input dataset is not of snapshot type. +func (d *Dataset) SendSnapshot(output io.Writer) error { + if d.Type != DatasetSnapshot { + return errors.New("can only send snapshots") + } + + c := command{Command: "zfs", Stdout: output} + _, err := c.Run("send", d.Name) + return err +} + +// CreateVolume creates a new ZFS volume with the specified name, size, and +// properties. +// A full list of available ZFS properties may be found here: +// https://www.freebsd.org/cgi/man.cgi?zfs(8). +func CreateVolume(name string, size uint64, properties map[string]string) (*Dataset, error) { + args := make([]string, 4, 5) + args[0] = "create" + args[1] = "-p" + args[2] = "-V" + args[3] = strconv.FormatUint(size, 10) + if properties != nil { + args = append(args, propsSlice(properties)...) + } + args = append(args, name) + _, err := zfs(args...) + if err != nil { + return nil, err + } + return GetDataset(name) +} + +// Destroy destroys a ZFS dataset. If the destroy bit flag is set, any +// descendents of the dataset will be recursively destroyed, including snapshots. +// If the deferred bit flag is set, the snapshot is marked for deferred +// deletion. +func (d *Dataset) Destroy(flags DestroyFlag) error { + args := make([]string, 1, 3) + args[0] = "destroy" + if flags&DestroyRecursive != 0 { + args = append(args, "-r") + } + + if flags&DestroyRecursiveClones != 0 { + args = append(args, "-R") + } + + if flags&DestroyDeferDeletion != 0 { + args = append(args, "-d") + } + + if flags&DestroyForceUmount != 0 { + args = append(args, "-f") + } + + args = append(args, d.Name) + _, err := zfs(args...) + return err +} + +// SetProperty sets a ZFS property on the receiving dataset. +// A full list of available ZFS properties may be found here: +// https://www.freebsd.org/cgi/man.cgi?zfs(8). +func (d *Dataset) SetProperty(key, val string) error { + prop := strings.Join([]string{key, val}, "=") + _, err := zfs("set", prop, d.Name) + return err +} + +// GetProperty returns the current value of a ZFS property from the +// receiving dataset. +// A full list of available ZFS properties may be found here: +// https://www.freebsd.org/cgi/man.cgi?zfs(8). +func (d *Dataset) GetProperty(key string) (string, error) { + out, err := zfs("get", key, d.Name) + if err != nil { + return "", err + } + + return out[0][2], nil +} + +// Snapshots returns a slice of all ZFS snapshots of a given dataset. +func (d *Dataset) Snapshots() ([]*Dataset, error) { + return Snapshots(d.Name) +} + +// CreateFilesystem creates a new ZFS filesystem with the specified name and +// properties. +// A full list of available ZFS properties may be found here: +// https://www.freebsd.org/cgi/man.cgi?zfs(8). +func CreateFilesystem(name string, properties map[string]string) (*Dataset, error) { + args := make([]string, 1, 4) + args[0] = "create" + + if properties != nil { + args = append(args, propsSlice(properties)...) + } + + args = append(args, name) + _, err := zfs(args...) + if err != nil { + return nil, err + } + return GetDataset(name) +} + +// Snapshot creates a new ZFS snapshot of the receiving dataset, using the +// specified name. Optionally, the snapshot can be taken recursively, creating +// snapshots of all descendent filesystems in a single, atomic operation. +func (d *Dataset) Snapshot(name string, recursive bool) (*Dataset, error) { + args := make([]string, 1, 4) + args[0] = "snapshot" + if recursive { + args = append(args, "-r") + } + snapName := fmt.Sprintf("%s@%s", d.Name, name) + args = append(args, snapName) + _, err := zfs(args...) + if err != nil { + return nil, err + } + return GetDataset(snapName) +} + +// Rollback rolls back the receiving ZFS dataset to a previous snapshot. +// Optionally, intermediate snapshots can be destroyed. A ZFS snapshot +// rollback cannot be completed without this option, if more recent +// snapshots exist. +// An error will be returned if the input dataset is not of snapshot type. +func (d *Dataset) Rollback(destroyMoreRecent bool) error { + if d.Type != DatasetSnapshot { + return errors.New("can only rollback snapshots") + } + + args := make([]string, 1, 3) + args[0] = "rollback" + if destroyMoreRecent { + args = append(args, "-r") + } + args = append(args, d.Name) + + _, err := zfs(args...) + return err +} + +// Children returns a slice of children of the receiving ZFS dataset. +// A recursion depth may be specified, or a depth of 0 allows unlimited +// recursion. +func (d *Dataset) Children(depth uint64) ([]*Dataset, error) { + args := []string{"get", "all", "-t", "all", "-Hp"} + if depth > 0 { + args = append(args, "-d") + args = append(args, strconv.FormatUint(depth, 10)) + } else { + args = append(args, "-r") + } + args = append(args, d.Name) + + out, err := zfs(args...) + if err != nil { + return nil, err + } + + var datasets []*Dataset + name := "" + var ds *Dataset + for _, line := range out { + if name != line[0] { + name = line[0] + ds = &Dataset{Name: name} + datasets = append(datasets, ds) + } + if err := ds.parseLine(line); err != nil { + return nil, err + } + } + return datasets[1:], nil +} + +// Diff returns changes between a snapshot and the given ZFS dataset. +// The snapshot name must include the filesystem part as it is possible to +// compare clones with their origin snapshots. +func (d *Dataset) Diff(snapshot string) ([]*InodeChange, error) { + args := []string{"diff", "-FH", snapshot, d.Name}[:] + out, err := zfs(args...) + if err != nil { + return nil, err + } + inodeChanges, err := parseInodeChanges(out) + if err != nil { + return nil, err + } + return inodeChanges, nil +} diff --git a/vendor/src/github.com/mistifyio/go-zfs/zfs_test.go b/vendor/src/github.com/mistifyio/go-zfs/zfs_test.go new file mode 100644 index 0000000000..e991a5cffc --- /dev/null +++ b/vendor/src/github.com/mistifyio/go-zfs/zfs_test.go @@ -0,0 +1,357 @@ +package zfs_test + +import ( + "fmt" + "io/ioutil" + "math" + "os" + "path/filepath" + "reflect" + "runtime" + "testing" + "time" + + "github.com/mistifyio/go-zfs" +) + +func sleep(delay int) { + time.Sleep(time.Duration(delay) * time.Second) +} + +func pow2(x int) int64 { + return int64(math.Pow(2, float64(x))) +} + +//https://github.com/benbjohnson/testing +// assert fails the test if the condition is false. +func assert(tb testing.TB, condition bool, msg string, v ...interface{}) { + if !condition { + _, file, line, _ := runtime.Caller(1) + fmt.Printf("\033[31m%s:%d: "+msg+"\033[39m\n\n", append([]interface{}{filepath.Base(file), line}, v...)...) + tb.FailNow() + } +} + +// ok fails the test if an err is not nil. +func ok(tb testing.TB, err error) { + if err != nil { + _, file, line, _ := runtime.Caller(1) + fmt.Printf("\033[31m%s:%d: unexpected error: %s\033[39m\n\n", filepath.Base(file), line, err.Error()) + tb.FailNow() + } +} + +// equals fails the test if exp is not equal to act. +func equals(tb testing.TB, exp, act interface{}) { + if !reflect.DeepEqual(exp, act) { + _, file, line, _ := runtime.Caller(1) + fmt.Printf("\033[31m%s:%d:\n\n\texp: %#v\n\n\tgot: %#v\033[39m\n\n", filepath.Base(file), line, exp, act) + tb.FailNow() + } +} + +func zpoolTest(t *testing.T, fn func()) { + tempfiles := make([]string, 3) + for i := range tempfiles { + f, _ := ioutil.TempFile("/tmp/", "zfs-") + defer f.Close() + err := f.Truncate(pow2(30)) + ok(t, err) + tempfiles[i] = f.Name() + defer os.Remove(f.Name()) + } + + pool, err := zfs.CreateZpool("test", nil, tempfiles...) + ok(t, err) + defer pool.Destroy() + ok(t, err) + fn() + +} + +func TestDatasets(t *testing.T) { + zpoolTest(t, func() { + _, err := zfs.Datasets("") + ok(t, err) + + ds, err := zfs.GetDataset("test") + ok(t, err) + equals(t, zfs.DatasetFilesystem, ds.Type) + equals(t, "", ds.Origin) + assert(t, ds.Logicalused > 0, "Logicalused is not greater than 0") + }) +} + +func TestSnapshots(t *testing.T) { + + zpoolTest(t, func() { + snapshots, err := zfs.Snapshots("") + ok(t, err) + + for _, snapshot := range snapshots { + equals(t, zfs.DatasetSnapshot, snapshot.Type) + } + }) +} + +func TestFilesystems(t *testing.T) { + zpoolTest(t, func() { + f, err := zfs.CreateFilesystem("test/filesystem-test", nil) + ok(t, err) + + filesystems, err := zfs.Filesystems("") + ok(t, err) + + for _, filesystem := range filesystems { + equals(t, zfs.DatasetFilesystem, filesystem.Type) + } + + ok(t, f.Destroy(zfs.DestroyDefault)) + }) +} + +func TestCreateFilesystemWithProperties(t *testing.T) { + zpoolTest(t, func() { + props := map[string]string{ + "compression": "lz4", + } + + f, err := zfs.CreateFilesystem("test/filesystem-test", props) + ok(t, err) + + equals(t, "lz4", f.Compression) + + filesystems, err := zfs.Filesystems("") + ok(t, err) + + for _, filesystem := range filesystems { + equals(t, zfs.DatasetFilesystem, filesystem.Type) + } + + ok(t, f.Destroy(zfs.DestroyDefault)) + }) +} + +func TestVolumes(t *testing.T) { + zpoolTest(t, func() { + v, err := zfs.CreateVolume("test/volume-test", uint64(pow2(23)), nil) + ok(t, err) + + // volumes are sometimes "busy" if you try to manipulate them right away + sleep(1) + + equals(t, zfs.DatasetVolume, v.Type) + volumes, err := zfs.Volumes("") + ok(t, err) + + for _, volume := range volumes { + equals(t, zfs.DatasetVolume, volume.Type) + } + + ok(t, v.Destroy(zfs.DestroyDefault)) + }) +} + +func TestSnapshot(t *testing.T) { + zpoolTest(t, func() { + f, err := zfs.CreateFilesystem("test/snapshot-test", nil) + ok(t, err) + + filesystems, err := zfs.Filesystems("") + ok(t, err) + + for _, filesystem := range filesystems { + equals(t, zfs.DatasetFilesystem, filesystem.Type) + } + + s, err := f.Snapshot("test", false) + ok(t, err) + + equals(t, zfs.DatasetSnapshot, s.Type) + + equals(t, "test/snapshot-test@test", s.Name) + + ok(t, s.Destroy(zfs.DestroyDefault)) + + ok(t, f.Destroy(zfs.DestroyDefault)) + }) +} + +func TestClone(t *testing.T) { + zpoolTest(t, func() { + f, err := zfs.CreateFilesystem("test/snapshot-test", nil) + ok(t, err) + + filesystems, err := zfs.Filesystems("") + ok(t, err) + + for _, filesystem := range filesystems { + equals(t, zfs.DatasetFilesystem, filesystem.Type) + } + + s, err := f.Snapshot("test", false) + ok(t, err) + + equals(t, zfs.DatasetSnapshot, s.Type) + equals(t, "test/snapshot-test@test", s.Name) + + c, err := s.Clone("test/clone-test", nil) + ok(t, err) + + equals(t, zfs.DatasetFilesystem, c.Type) + + ok(t, c.Destroy(zfs.DestroyDefault)) + + ok(t, s.Destroy(zfs.DestroyDefault)) + + ok(t, f.Destroy(zfs.DestroyDefault)) + }) +} + +func TestSendSnapshot(t *testing.T) { + zpoolTest(t, func() { + f, err := zfs.CreateFilesystem("test/snapshot-test", nil) + ok(t, err) + + filesystems, err := zfs.Filesystems("") + ok(t, err) + + for _, filesystem := range filesystems { + equals(t, zfs.DatasetFilesystem, filesystem.Type) + } + + s, err := f.Snapshot("test", false) + ok(t, err) + + file, _ := ioutil.TempFile("/tmp/", "zfs-") + defer file.Close() + err = file.Truncate(pow2(30)) + ok(t, err) + defer os.Remove(file.Name()) + + err = s.SendSnapshot(file) + ok(t, err) + + ok(t, s.Destroy(zfs.DestroyDefault)) + + ok(t, f.Destroy(zfs.DestroyDefault)) + }) +} + +func TestChildren(t *testing.T) { + zpoolTest(t, func() { + f, err := zfs.CreateFilesystem("test/snapshot-test", nil) + ok(t, err) + + s, err := f.Snapshot("test", false) + ok(t, err) + + equals(t, zfs.DatasetSnapshot, s.Type) + equals(t, "test/snapshot-test@test", s.Name) + + children, err := f.Children(0) + ok(t, err) + + equals(t, 1, len(children)) + equals(t, "test/snapshot-test@test", children[0].Name) + + ok(t, s.Destroy(zfs.DestroyDefault)) + ok(t, f.Destroy(zfs.DestroyDefault)) + }) +} + +func TestListZpool(t *testing.T) { + zpoolTest(t, func() { + _, err := zfs.ListZpools() + ok(t, err) + }) +} + +func TestRollback(t *testing.T) { + zpoolTest(t, func() { + f, err := zfs.CreateFilesystem("test/snapshot-test", nil) + ok(t, err) + + filesystems, err := zfs.Filesystems("") + ok(t, err) + + for _, filesystem := range filesystems { + equals(t, zfs.DatasetFilesystem, filesystem.Type) + } + + s1, err := f.Snapshot("test", false) + ok(t, err) + + _, err = f.Snapshot("test2", false) + ok(t, err) + + s3, err := f.Snapshot("test3", false) + ok(t, err) + + err = s3.Rollback(false) + ok(t, err) + + err = s1.Rollback(false) + assert(t, ok != nil, "should error when rolling back beyond most recent without destroyMoreRecent = true") + + err = s1.Rollback(true) + ok(t, err) + + ok(t, s1.Destroy(zfs.DestroyDefault)) + + ok(t, f.Destroy(zfs.DestroyDefault)) + }) +} + +func TestDiff(t *testing.T) { + zpoolTest(t, func() { + fs, err := zfs.CreateFilesystem("test/origin", nil) + ok(t, err) + + linkedFile, err := os.Create(filepath.Join(fs.Mountpoint, "linked")) + ok(t, err) + + movedFile, err := os.Create(filepath.Join(fs.Mountpoint, "file")) + ok(t, err) + + snapshot, err := fs.Snapshot("snapshot", false) + ok(t, err) + + unicodeFile, err := os.Create(filepath.Join(fs.Mountpoint, "i ❤ unicode")) + ok(t, err) + + err = os.Rename(movedFile.Name(), movedFile.Name()+"-new") + ok(t, err) + + err = os.Link(linkedFile.Name(), linkedFile.Name()+"_hard") + ok(t, err) + + inodeChanges, err := fs.Diff(snapshot.Name) + ok(t, err) + equals(t, 4, len(inodeChanges)) + + equals(t, "/test/origin/", inodeChanges[0].Path) + equals(t, zfs.Directory, inodeChanges[0].Type) + equals(t, zfs.Modified, inodeChanges[0].Change) + + equals(t, "/test/origin/linked", inodeChanges[1].Path) + equals(t, zfs.File, inodeChanges[1].Type) + equals(t, zfs.Modified, inodeChanges[1].Change) + equals(t, 1, inodeChanges[1].ReferenceCountChange) + + equals(t, "/test/origin/file", inodeChanges[2].Path) + equals(t, "/test/origin/file-new", inodeChanges[2].NewPath) + equals(t, zfs.File, inodeChanges[2].Type) + equals(t, zfs.Renamed, inodeChanges[2].Change) + + equals(t, "/test/origin/i ❤ unicode", inodeChanges[3].Path) + equals(t, zfs.File, inodeChanges[3].Type) + equals(t, zfs.Created, inodeChanges[3].Change) + + ok(t, movedFile.Close()) + ok(t, unicodeFile.Close()) + ok(t, linkedFile.Close()) + ok(t, snapshot.Destroy(zfs.DestroyForceUmount)) + ok(t, fs.Destroy(zfs.DestroyForceUmount)) + }) +} diff --git a/vendor/src/github.com/mistifyio/go-zfs/zpool.go b/vendor/src/github.com/mistifyio/go-zfs/zpool.go new file mode 100644 index 0000000000..59be0a84ce --- /dev/null +++ b/vendor/src/github.com/mistifyio/go-zfs/zpool.go @@ -0,0 +1,108 @@ +package zfs + +// ZFS zpool states, which can indicate if a pool is online, offline, +// degraded, etc. More information regarding zpool states can be found here: +// https://docs.oracle.com/cd/E19253-01/819-5461/gamno/index.html. +const ( + ZpoolOnline = "ONLINE" + ZpoolDegraded = "DEGRADED" + ZpoolFaulted = "FAULTED" + ZpoolOffline = "OFFLINE" + ZpoolUnavail = "UNAVAIL" + ZpoolRemoved = "REMOVED" +) + +// Zpool is a ZFS zpool. A pool is a top-level structure in ZFS, and can +// contain many descendent datasets. +type Zpool struct { + Name string + Health string + Allocated uint64 + Size uint64 + Free uint64 +} + +// zpool is a helper function to wrap typical calls to zpool. +func zpool(arg ...string) ([][]string, error) { + c := command{Command: "zpool"} + return c.Run(arg...) +} + +// GetZpool retrieves a single ZFS zpool by name. +func GetZpool(name string) (*Zpool, error) { + out, err := zpool("get", "all", "-p", name) + if err != nil { + return nil, err + } + + // there is no -H + out = out[1:] + + z := &Zpool{Name: name} + for _, line := range out { + if err := z.parseLine(line); err != nil { + return nil, err + } + } + + return z, nil +} + +// Datasets returns a slice of all ZFS datasets in a zpool. +func (z *Zpool) Datasets() ([]*Dataset, error) { + return Datasets(z.Name) +} + +// Snapshots returns a slice of all ZFS snapshots in a zpool. +func (z *Zpool) Snapshots() ([]*Dataset, error) { + return Snapshots(z.Name) +} + +// CreateZpool creates a new ZFS zpool with the specified name, properties, +// and optional arguments. +// A full list of available ZFS properties and command-line arguments may be +// found here: https://www.freebsd.org/cgi/man.cgi?zfs(8). +func CreateZpool(name string, properties map[string]string, args ...string) (*Zpool, error) { + cli := make([]string, 1, 4) + cli[0] = "create" + if properties != nil { + cli = append(cli, propsSlice(properties)...) + } + cli = append(cli, name) + cli = append(cli, args...) + _, err := zpool(cli...) + if err != nil { + return nil, err + } + + return &Zpool{Name: name}, nil +} + +// Destroy destroys a ZFS zpool by name. +func (z *Zpool) Destroy() error { + _, err := zpool("destroy", z.Name) + return err +} + +// ListZpools list all ZFS zpools accessible on the current system. +func ListZpools() ([]*Zpool, error) { + args := []string{"list", "-Ho", "name"} + out, err := zpool(args...) + if err != nil { + return nil, err + } + + // there is no -H + out = out[1:] + + var pools []*Zpool + + for _, line := range out { + z, err := GetZpool(line[0]) + if err != nil { + return nil, err + } + pools = append(pools, z) + } + return pools, nil +} From bacecabf3be2d6b180cbe8fc37ff90f79138bb6e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Fri, 20 Mar 2015 00:28:17 +0100 Subject: [PATCH 2/9] zfs: revert to NaiveGraphDriver for the moment MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jörg Thalheim --- daemon/graphdriver/zfs/zfs.go | 104 +--------------------------------- 1 file changed, 3 insertions(+), 101 deletions(-) diff --git a/daemon/graphdriver/zfs/zfs.go b/daemon/graphdriver/zfs/zfs.go index 8873c4736f..9d4727870e 100644 --- a/daemon/graphdriver/zfs/zfs.go +++ b/daemon/graphdriver/zfs/zfs.go @@ -21,8 +21,6 @@ import ( log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/parsers" zfs "github.com/mistifyio/go-zfs" ) @@ -84,10 +82,11 @@ func Init(base string, opt []string) (graphdriver.Driver, error) { return nil, fmt.Errorf("Cannot open %s", options.fsName) } - return &Driver{ + d := &Driver{ dataset: dataset, options: options, - }, nil + } + return graphdriver.NaiveDiffDriver(d), nil } func parseOptions(opt []string) (ZfsOptions, error) { @@ -276,100 +275,3 @@ func (d *Driver) Exists(id string) bool { _, err := zfs.GetDataset(d.ZfsPath(id)) return err == nil } - -func zfsChanges(dataset *zfs.Dataset) ([]archive.Change, error) { - if dataset.Origin == "" { // should never happen - return nil, fmt.Errorf("no origin found for dataset '%s'. expected a clone", dataset.Name) - } - changes, err := dataset.Diff(dataset.Origin) - if err != nil { - return nil, err - } - - // for rename changes, we have to add a ADD and a REMOVE - renameCount := 0 - for _, change := range changes { - if change.Change == zfs.Renamed { - renameCount++ - } - } - archiveChanges := make([]archive.Change, len(changes)+renameCount) - i := 0 - for _, change := range changes { - var changeType archive.ChangeType - mountpointLen := len(dataset.Mountpoint) - basePath := change.Path[mountpointLen:] - switch change.Change { - case zfs.Renamed: - archiveChanges[i] = archive.Change{basePath, archive.ChangeDelete} - newBasePath := change.NewPath[mountpointLen:] - archiveChanges[i+1] = archive.Change{newBasePath, archive.ChangeAdd} - i += 2 - continue - case zfs.Created: - changeType = archive.ChangeAdd - case zfs.Modified: - changeType = archive.ChangeModify - case zfs.Removed: - changeType = archive.ChangeDelete - } - archiveChanges[i] = archive.Change{basePath, changeType} - i++ - } - - return archiveChanges, nil -} - -func (d *Driver) Diff(id, parent string) (archive.Archive, error) { - dataset, err := zfs.GetDataset(d.ZfsPath(id)) - if err != nil { - return nil, err - } - changes, err := zfsChanges(dataset) - if err != nil { - return nil, err - } - - archive, err := archive.ExportChanges(dataset.Mountpoint, changes) - if err != nil { - return nil, err - } - return ioutils.NewReadCloserWrapper(archive, func() error { - err := archive.Close() - d.Put(id) - return err - }), nil -} - -func (d *Driver) DiffSize(id, parent string) (bytes int64, err error) { - dataset, err := zfs.GetDataset(d.ZfsPath(id)) - if err == nil { - return int64((*dataset).Logicalused), nil - } else { - return -1, err - } -} - -func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { - dataset, err := zfs.GetDataset(d.ZfsPath(id)) - if err != nil { - return nil, err - } - return zfsChanges(dataset) -} - -func (d *Driver) ApplyDiff(id, parent string, diff archive.ArchiveReader) (int64, error) { - dataset, err := zfs.GetDataset(d.ZfsPath(id)) - if err != nil { - return -1, err - } - _, err = archive.ApplyLayer(dataset.Mountpoint, diff) - if err != nil { - return -1, err - } - updatedDataset, err := zfs.GetDataset(d.ZfsPath(id)) - if err != nil { - return -1, err - } - return int64(updatedDataset.Logicalused), nil -} From bdf784ae4ae62a560f28e974ff9b9c26aaf8a533 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Thu, 2 Apr 2015 21:56:18 +0200 Subject: [PATCH 3/9] docs: move zfs.fsname option to storage option section MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jörg Thalheim --- docs/sources/reference/commandline/cli.md | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index aa0b1fd2d5..c443617b4f 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -275,8 +275,7 @@ share executable memory between devices. Use `docker -d -s btrfs -g /mnt/btrfs_p The `zfs` driver is probably not fast as `btrfs` but has a longer track record on stability. Thanks to `Single Copy ARC` shared blocks between clones will be cached only once. Use `docker -d -s zfs`. To select a different zfs filesystem -as backingstore use the storage option `zfs.fsname`: -`docker -d -s zfs --storage-opt zfs.fsname=zroot/docker` +set `zfs.fsname` option as described in [Storage driver options](#storage-driver-options): The `overlay` is a very fast union filesystem. It is now merged in the main Linux kernel as of [3.18.0](https://lkml.org/lkml/2014/10/26/137). @@ -288,10 +287,10 @@ Call `docker -d -s overlay` to use it. #### Storage driver options Particular storage-driver can be configured with options specified with -`--storage-opt` flags. The only driver accepting options is `devicemapper` as -of now. All its options are prefixed with `dm`. +`--storage-opt` flags. Options for `devicemapper` are prefixed with `dm` and +options for `zfs` start with `zfs`. -Currently supported options are: +Currently supported options of `devicemapper`: * `dm.basesize` @@ -450,6 +449,17 @@ Currently supported options are: > daemon with a supported environment. ### Docker execdriver option +Currently supported options of `zfs`: + + * `zfs.fsname` + + Set zfs filesystem under which docker will create its own datasets. + By default docker will pick up the zfs filesystem where docker graph + (`/var/lib/docker`) is located. + + Example use: + + $ docker -d -s zfs --storage-opt zfs.fsname=zroot/docker The Docker daemon uses a specifically built `libcontainer` execution driver as its interface to the Linux kernel `namespaces`, `cgroups`, and `SELinux`. From a25195d86cb54cbad0bfe840267beaa9f0a84c74 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Fri, 3 Apr 2015 07:24:08 +0200 Subject: [PATCH 4/9] zfs: add myself to MAINTAINERS MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jörg Thalheim --- daemon/graphdriver/zfs/MAINTAINERS | 1 + 1 file changed, 1 insertion(+) diff --git a/daemon/graphdriver/zfs/MAINTAINERS b/daemon/graphdriver/zfs/MAINTAINERS index 88c4bdfaf7..9c270c541f 100644 --- a/daemon/graphdriver/zfs/MAINTAINERS +++ b/daemon/graphdriver/zfs/MAINTAINERS @@ -1 +1,2 @@ +Jörg Thalheim (@Mic92) Arthur Gautier (@baloose) From dd614b5e34bc82c6921c44619532f2467ce43baa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Mon, 13 Apr 2015 14:21:46 +0200 Subject: [PATCH 5/9] zfs: refactor error handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit thanks to @calavera Signed-off-by: Jörg Thalheim --- daemon/graphdriver/zfs/zfs.go | 97 ++++++++++++++++++----------------- 1 file changed, 49 insertions(+), 48 deletions(-) diff --git a/daemon/graphdriver/zfs/zfs.go b/daemon/graphdriver/zfs/zfs.go index 9d4727870e..12e0dd8e9c 100644 --- a/daemon/graphdriver/zfs/zfs.go +++ b/daemon/graphdriver/zfs/zfs.go @@ -1,10 +1,12 @@ package zfs /* -#include #include #include #include + +const char* PROC_MOUNTS = "/proc/mounts"; +const char* OPEN_MODE = "r"; */ import "C" @@ -43,10 +45,10 @@ func (*Logger) Log(cmd []string) { func Init(base string, opt []string) (graphdriver.Driver, error) { var err error options, err := parseOptions(opt) - options.mountPath = base if err != nil { return nil, err } + options.mountPath = base rootdir := path.Dir(base) @@ -58,14 +60,14 @@ func Init(base string, opt []string) (graphdriver.Driver, error) { } if _, err := exec.LookPath("zfs"); err != nil { - return nil, fmt.Errorf("zfs command is not available") + return nil, fmt.Errorf("zfs command is not available: %v", err) } file, err := os.OpenFile("/dev/zfs", os.O_RDWR, 600) - defer file.Close() if err != nil { - return nil, fmt.Errorf("Failed to initialize: %v", err) + return nil, fmt.Errorf("cannot open /dev/zfs: %v", err) } + defer file.Close() if options.fsName == "" { options.fsName, err = lookupZfsDataset(rootdir) @@ -74,8 +76,7 @@ func Init(base string, opt []string) (graphdriver.Driver, error) { } } - logger := Logger{} - zfs.SetLogger(&logger) + zfs.SetLogger(new(Logger)) dataset, err := zfs.GetDataset(options.fsName) if err != nil { @@ -102,7 +103,7 @@ func parseOptions(opt []string) (ZfsOptions, error) { case "zfs.fsname": options.fsName = val default: - return options, fmt.Errorf("Unknown option %s\n", key) + return options, fmt.Errorf("Unknown option %s", key) } } return options, nil @@ -111,7 +112,7 @@ func parseOptions(opt []string) (ZfsOptions, error) { func checkRootdirFs(rootdir string) error { var buf syscall.Statfs_t if err := syscall.Statfs(rootdir, &buf); err != nil { - fmt.Errorf("Failed to access '%s': %s", rootdir, err) + return fmt.Errorf("Failed to access '%s': %s", rootdir, err) } if graphdriver.FsMagic(buf.Type) != graphdriver.FsMagicZfs { @@ -121,27 +122,24 @@ func checkRootdirFs(rootdir string) error { return nil } -var CprocMounts = C.CString("/proc/mounts") -var CopenMod = C.CString("r") - func lookupZfsDataset(rootdir string) (string, error) { var stat syscall.Stat_t - var Cmnt C.struct_mntent - var Cfp *C.FILE - buf := string(make([]byte, 256, 256)) - Cbuf := C.CString(buf) - defer free(Cbuf) - if err := syscall.Stat(rootdir, &stat); err != nil { return "", fmt.Errorf("Failed to access '%s': %s", rootdir, err) } wantedDev := stat.Dev - if Cfp = C.setmntent(CprocMounts, CopenMod); Cfp == nil { - return "", fmt.Errorf("Failed to open /proc/mounts") + Cfp, err := C.setmntent(C.PROC_MOUNTS, C.OPEN_MODE) + if err != nil { + return "", fmt.Errorf("Failed to open /proc/mounts: %v", err) } defer C.endmntent(Cfp) + var Cmnt C.struct_mntent + buf := string(make([]byte, 256, 256)) + Cbuf := C.CString(buf) + defer C.free(unsafe.Pointer(Cbuf)) + for C.getmntent_r(Cfp, &Cmnt, Cbuf, 256) != nil { dir := C.GoString(Cmnt.mnt_dir) if err := syscall.Stat(dir, &stat); err != nil { @@ -154,12 +152,8 @@ func lookupZfsDataset(rootdir string) (string, error) { return C.GoString(Cmnt.mnt_fsname), nil } } - // should never happen - return "", fmt.Errorf("Failed to find zfs pool in /proc/mounts") -} -func free(p *C.char) { - C.free(unsafe.Pointer(p)) + return "", fmt.Errorf("Failed to find zfs dataset mounted on '%s' in /proc/mounts", rootdir) } type Driver struct { @@ -179,21 +173,23 @@ func (d *Driver) Status() [][2]string { parts := strings.Split(d.dataset.Name, "/") pool, err := zfs.GetZpool(parts[0]) - if err != nil { - return [][2]string{ - {"error while getting pool", fmt.Sprintf("%v", err)}, - } - } - var quota string - if d.dataset.Quota == 0 { - quota = strconv.FormatUint(d.dataset.Quota, 10) + var poolName, poolHealth string + if err == nil { + poolName = pool.Name + poolHealth = pool.Health } else { - quota = "no" + poolName = fmt.Sprintf("error while getting pool information %v", err) + poolHealth = "not available" + } + + quota := "no" + if d.dataset.Quota != 0 { + quota = strconv.FormatUint(d.dataset.Quota, 10) } return [][2]string{ - {"Zpool", pool.Name}, - {"Zpool Health", pool.Health}, + {"Zpool", poolName}, + {"Zpool Health", poolHealth}, {"Parent Dataset", d.dataset.Name}, {"Space Used By Parent", strconv.FormatUint(d.dataset.Used, 10)}, {"Space Available", strconv.FormatUint(d.dataset.Avail, 10)}, @@ -204,12 +200,12 @@ func (d *Driver) Status() [][2]string { func cloneFilesystem(id, parent, mountpoint string) error { parentDataset, err := zfs.GetDataset(parent) - if parentDataset == nil { + if err != nil { return err } snapshotName := fmt.Sprintf("%d", time.Now().Nanosecond()) snapshot, err := parentDataset.Snapshot(snapshotName /*recursive */, false) - if snapshot == nil { + if err != nil { return err } @@ -220,8 +216,7 @@ func cloneFilesystem(id, parent, mountpoint string) error { snapshot.Destroy(zfs.DestroyDeferDeletion) return err } - err = snapshot.Destroy(zfs.DestroyDeferDeletion) - return err + return snapshot.Destroy(zfs.DestroyDeferDeletion) } func (d *Driver) ZfsPath(id string) string { @@ -229,23 +224,30 @@ func (d *Driver) ZfsPath(id string) string { } func (d *Driver) Create(id string, parent string) error { - mountPoint := path.Join(d.options.mountPath, "graph", id) datasetName := d.ZfsPath(id) dataset, err := zfs.GetDataset(datasetName) if err == nil { // cleanup existing dataset from an aborted build - dataset.Destroy(zfs.DestroyRecursiveClones) + err := dataset.Destroy(zfs.DestroyRecursiveClones) + if err != nil { + log.Warnf("[zfs] failed to destroy dataset '%s': %v", dataset.Name, err) + } + } else if zfsError, ok := err.(*zfs.Error); ok { + if !strings.HasSuffix(zfsError.Stderr, "dataset does not exist\n") { + return err + } + } else { + return err } + mountPoint := path.Join(d.options.mountPath, "graph", id) if parent == "" { _, err := zfs.CreateFilesystem(datasetName, map[string]string{ "mountpoint": mountPoint, }) return err - } else { - return cloneFilesystem(datasetName, d.ZfsPath(parent), mountPoint) } - return nil + return cloneFilesystem(datasetName, d.ZfsPath(parent), mountPoint) } func (d *Driver) Remove(id string) error { @@ -259,11 +261,10 @@ func (d *Driver) Remove(id string) error { func (d *Driver) Get(id, mountLabel string) (string, error) { dataset, err := zfs.GetDataset(d.ZfsPath(id)) - if dataset == nil { + if err != nil { return "", err - } else { - return dataset.Mountpoint, nil } + return dataset.Mountpoint, nil } func (d *Driver) Put(id string) error { From 30f3bd643d5f6706bbaa9559e781b036661f149e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Wed, 15 Apr 2015 08:39:15 +0200 Subject: [PATCH 6/9] integration: add variable to set storage options for testing MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jörg Thalheim --- Makefile | 1 + hack/make/.integration-daemon-start | 11 +++++++++++ 2 files changed, 12 insertions(+) diff --git a/Makefile b/Makefile index d13960229a..b98424b6c0 100644 --- a/Makefile +++ b/Makefile @@ -8,6 +8,7 @@ DOCKER_ENVS := \ -e DOCKER_CLIENTONLY \ -e DOCKER_EXECDRIVER \ -e DOCKER_GRAPHDRIVER \ + -e DOCKER_STORAGE_OPTS \ -e TESTDIRS \ -e TESTFLAGS \ -e TIMEOUT diff --git a/hack/make/.integration-daemon-start b/hack/make/.integration-daemon-start index 937979df3e..41f116f461 100644 --- a/hack/make/.integration-daemon-start +++ b/hack/make/.integration-daemon-start @@ -16,6 +16,16 @@ export DOCKER_GRAPHDRIVER=${DOCKER_GRAPHDRIVER:-vfs} export DOCKER_EXECDRIVER=${DOCKER_EXECDRIVER:-native} export DOCKER_USERLANDPROXY=${DOCKER_USERLANDPROXY:-true} +# example usage: DOCKER_STORAGE_OPTS="dm.basesize=20G,dm.loopdatasize=200G" +storage_params="" +if [ -n "$DOCKER_STORAGE_OPTS" ]; then + IFS=',' + for i in ${DOCKER_STORAGE_OPTS}; do + storage_params="--storage-opt $i $storage_params" + done + unset IFS +fi + if [ -z "$DOCKER_TEST_HOST" ]; then export DOCKER_HOST="unix://$(cd "$DEST" && pwd)/docker.sock" # "pwd" tricks to make sure $DEST is an absolute path, not a relative one ( set -x; exec \ @@ -25,6 +35,7 @@ if [ -z "$DOCKER_TEST_HOST" ]; then --exec-driver "$DOCKER_EXECDRIVER" \ --pidfile "$DEST/docker.pid" \ --userland-proxy="$DOCKER_USERLANDPROXY" \ + $storage_params \ &> "$DEST/docker.log" ) & trap "source '${MAKEDIR}/.integration-daemon-stop'" EXIT # make sure that if the script exits unexpectedly, we stop this daemon we just started From ee00f07ea64074e8abf7d741eed8a83da4e4b5ff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Thu, 16 Apr 2015 13:06:44 +0200 Subject: [PATCH 7/9] zfs: replace c for /proc/mounts parsing with go MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jörg Thalheim --- daemon/graphdriver/zfs/zfs.go | 35 ++++++++--------------------------- 1 file changed, 8 insertions(+), 27 deletions(-) diff --git a/daemon/graphdriver/zfs/zfs.go b/daemon/graphdriver/zfs/zfs.go index 12e0dd8e9c..1e0a703497 100644 --- a/daemon/graphdriver/zfs/zfs.go +++ b/daemon/graphdriver/zfs/zfs.go @@ -1,15 +1,5 @@ package zfs -/* -#include -#include -#include - -const char* PROC_MOUNTS = "/proc/mounts"; -const char* OPEN_MODE = "r"; -*/ -import "C" - import ( "fmt" "os" @@ -19,10 +9,10 @@ import ( "strings" "syscall" "time" - "unsafe" log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/parsers" zfs "github.com/mistifyio/go-zfs" ) @@ -129,27 +119,18 @@ func lookupZfsDataset(rootdir string) (string, error) { } wantedDev := stat.Dev - Cfp, err := C.setmntent(C.PROC_MOUNTS, C.OPEN_MODE) + mounts, err := mount.GetMounts() if err != nil { - return "", fmt.Errorf("Failed to open /proc/mounts: %v", err) + return "", err } - defer C.endmntent(Cfp) - - var Cmnt C.struct_mntent - buf := string(make([]byte, 256, 256)) - Cbuf := C.CString(buf) - defer C.free(unsafe.Pointer(Cbuf)) - - for C.getmntent_r(Cfp, &Cmnt, Cbuf, 256) != nil { - dir := C.GoString(Cmnt.mnt_dir) - if err := syscall.Stat(dir, &stat); err != nil { - log.Debugf("[zfs] failed to stat '%s' while scanning for zfs mount: %v", dir, err) + for _, m := range mounts { + if err := syscall.Stat(m.Mountpoint, &stat); err != nil { + log.Debugf("[zfs] failed to stat '%s' while scanning for zfs mount: %v", m.Mountpoint, err) continue // may fail on fuse file systems } - fs := C.GoString(Cmnt.mnt_type) - if stat.Dev == wantedDev && fs == "zfs" { - return C.GoString(Cmnt.mnt_fsname), nil + if stat.Dev == wantedDev && m.Fstype == "zfs" { + return m.Source, nil } } From 11e9167a6b45fdc134ee43e89abefd34a85cf624 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Thu, 16 Apr 2015 14:05:43 +0200 Subject: [PATCH 8/9] zfs: improve performance by using legacy mounts MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit instead of let zfs automaticly mount datasets, mount them on demand using mount(2). This speed up this graph driver in 2 ways: - less zfs processes needed to start a container - /proc/mounts get smaller, so zfs userspace tools has less to read (which can a significant amount of data as the number of layer grows) This ways it can be also ensured that the correct mountpoint is always used. Signed-off-by: Jörg Thalheim --- daemon/graphdriver/zfs/zfs.go | 80 +++++++++++++++++++++-------------- 1 file changed, 48 insertions(+), 32 deletions(-) diff --git a/daemon/graphdriver/zfs/zfs.go b/daemon/graphdriver/zfs/zfs.go index 1e0a703497..9f6734950a 100644 --- a/daemon/graphdriver/zfs/zfs.go +++ b/daemon/graphdriver/zfs/zfs.go @@ -179,20 +179,15 @@ func (d *Driver) Status() [][2]string { } } -func cloneFilesystem(id, parent, mountpoint string) error { - parentDataset, err := zfs.GetDataset(parent) - if err != nil { - return err - } +func cloneFilesystem(name, parentName string) error { snapshotName := fmt.Sprintf("%d", time.Now().Nanosecond()) + parentDataset := zfs.Dataset{Name: parentName} snapshot, err := parentDataset.Snapshot(snapshotName /*recursive */, false) if err != nil { return err } - _, err = snapshot.Clone(id, map[string]string{ - "mountpoint": mountpoint, - }) + _, err = snapshot.Clone(name, map[string]string{"mountpoint": "legacy"}) if err != nil { snapshot.Destroy(zfs.DestroyDeferDeletion) return err @@ -204,52 +199,73 @@ func (d *Driver) ZfsPath(id string) string { return d.options.fsName + "/" + id } +func (d *Driver) MountPath(id string) string { + return path.Join(d.options.mountPath, "graph", id) +} + func (d *Driver) Create(id string, parent string) error { - datasetName := d.ZfsPath(id) - dataset, err := zfs.GetDataset(datasetName) + err := d.create(id, parent) if err == nil { - // cleanup existing dataset from an aborted build - err := dataset.Destroy(zfs.DestroyRecursiveClones) - if err != nil { - log.Warnf("[zfs] failed to destroy dataset '%s': %v", dataset.Name, err) - } - } else if zfsError, ok := err.(*zfs.Error); ok { - if !strings.HasSuffix(zfsError.Stderr, "dataset does not exist\n") { + return nil + } + if zfsError, ok := err.(*zfs.Error); ok { + if !strings.HasSuffix(zfsError.Stderr, "dataset already exists\n") { return err } + // aborted build -> cleanup } else { return err } - mountPoint := path.Join(d.options.mountPath, "graph", id) - if parent == "" { - _, err := zfs.CreateFilesystem(datasetName, map[string]string{ - "mountpoint": mountPoint, - }) + dataset := zfs.Dataset{Name: d.ZfsPath(id)} + if err := dataset.Destroy(zfs.DestroyRecursiveClones); err != nil { return err } - return cloneFilesystem(datasetName, d.ZfsPath(parent), mountPoint) + + // retry + return d.create(id, parent) +} + +func (d *Driver) create(id, parent string) error { + name := d.ZfsPath(id) + if parent == "" { + mountoptions := map[string]string{"mountpoint": "legacy"} + _, err := zfs.CreateFilesystem(name, mountoptions) + return err + } + return cloneFilesystem(name, d.ZfsPath(parent)) } func (d *Driver) Remove(id string) error { - dataset, err := zfs.GetDataset(d.ZfsPath(id)) - if dataset == nil { - return err - } - + dataset := zfs.Dataset{Name: d.ZfsPath(id)} return dataset.Destroy(zfs.DestroyRecursive) } func (d *Driver) Get(id, mountLabel string) (string, error) { - dataset, err := zfs.GetDataset(d.ZfsPath(id)) - if err != nil { + mountpoint := d.MountPath(id) + filesystem := d.ZfsPath(id) + log.Debugf(`[zfs] mount("%s", "%s", "%s")`, filesystem, mountpoint, mountLabel) + + // Create the target directories if they don't exist + if err := os.MkdirAll(mountpoint, 0755); err != nil && !os.IsExist(err) { return "", err } - return dataset.Mountpoint, nil + + err := mount.Mount(filesystem, mountpoint, "zfs", mountLabel) + if err != nil { + return "", fmt.Errorf("error creating zfs mount of %s to %s: %v", filesystem, mountpoint, err) + } + + return mountpoint, nil } func (d *Driver) Put(id string) error { - // FS is already mounted + mountpoint := d.MountPath(id) + log.Debugf(`[zfs] unmount("%s")`, mountpoint) + + if err := mount.Unmount(mountpoint); err != nil { + return fmt.Errorf("error unmounting to %s: %v", mountpoint, err) + } return nil } From bad25ccf978b56da6fa181439504ab33906524cd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Sat, 18 Apr 2015 00:14:47 +0200 Subject: [PATCH 9/9] zfs: retrieve all filesystems on startup at once MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The docker graph call driver.Exists() on initialisation for each filesystem in the graph. This results will results in a lot `zfs get all` commands. To reduce this, retrieve all descend filesystem at startup and cache it for later checks Signed-off-by: Jörg Thalheim --- daemon/graphdriver/zfs/zfs.go | 30 ++++++++++++++++++++++-------- 1 file changed, 22 insertions(+), 8 deletions(-) diff --git a/daemon/graphdriver/zfs/zfs.go b/daemon/graphdriver/zfs/zfs.go index 9f6734950a..5126f87de3 100644 --- a/daemon/graphdriver/zfs/zfs.go +++ b/daemon/graphdriver/zfs/zfs.go @@ -68,14 +68,28 @@ func Init(base string, opt []string) (graphdriver.Driver, error) { zfs.SetLogger(new(Logger)) - dataset, err := zfs.GetDataset(options.fsName) + filesystems, err := zfs.Filesystems(options.fsName) if err != nil { - return nil, fmt.Errorf("Cannot open %s", options.fsName) + return nil, fmt.Errorf("Cannot find root filesystem %s: %v", options.fsName, err) + } + + filesystemsCache := make(map[string]bool, len(filesystems)) + var rootDataset *zfs.Dataset + for _, fs := range filesystems { + if fs.Name == options.fsName { + rootDataset = fs + } + filesystemsCache[fs.Name] = true + } + + if rootDataset == nil { + return nil, fmt.Errorf("BUG: zfs get all -t filesystems -rHp '%s' should contain '%s'", options.fsName, options.fsName) } d := &Driver{ - dataset: dataset, - options: options, + dataset: rootDataset, + options: options, + filesystemsCache: filesystemsCache, } return graphdriver.NaiveDiffDriver(d), nil } @@ -138,8 +152,9 @@ func lookupZfsDataset(rootdir string) (string, error) { } type Driver struct { - dataset *zfs.Dataset - options ZfsOptions + dataset *zfs.Dataset + options ZfsOptions + filesystemsCache map[string]bool } func (d *Driver) String() string { @@ -270,6 +285,5 @@ func (d *Driver) Put(id string) error { } func (d *Driver) Exists(id string) bool { - _, err := zfs.GetDataset(d.ZfsPath(id)) - return err == nil + return d.filesystemsCache[d.ZfsPath(id)] == true }