Просмотр исходного кода

vendor: remove boltdb dependency which is superseded by bbolt

This also brings in these PRs from swarmkit:
- https://github.com/docker/swarmkit/pull/2691
- https://github.com/docker/swarmkit/pull/2744
- https://github.com/docker/swarmkit/pull/2732
- https://github.com/docker/swarmkit/pull/2729
- https://github.com/docker/swarmkit/pull/2748

Signed-off-by: Tibor Vass <tibor@docker.com>
Tibor Vass 6 лет назад
Родитель
Сommit
cce1763d57
100 измененных файлов с 3247 добавлено и 6759 удалено
  1. 8 12
      daemon/cluster/noderunner.go
  2. 6 6
      vendor.conf
  3. 0 20
      vendor/github.com/boltdb/bolt/LICENSE
  4. 0 857
      vendor/github.com/boltdb/bolt/README.md
  5. 0 10
      vendor/github.com/boltdb/bolt/bolt_386.go
  6. 0 10
      vendor/github.com/boltdb/bolt/bolt_amd64.go
  7. 0 28
      vendor/github.com/boltdb/bolt/bolt_arm.go
  8. 0 12
      vendor/github.com/boltdb/bolt/bolt_arm64.go
  9. 0 10
      vendor/github.com/boltdb/bolt/bolt_linux.go
  10. 0 27
      vendor/github.com/boltdb/bolt/bolt_openbsd.go
  11. 0 9
      vendor/github.com/boltdb/bolt/bolt_ppc.go
  12. 0 9
      vendor/github.com/boltdb/bolt/bolt_ppc64.go
  13. 0 12
      vendor/github.com/boltdb/bolt/bolt_ppc64le.go
  14. 0 12
      vendor/github.com/boltdb/bolt/bolt_s390x.go
  15. 0 89
      vendor/github.com/boltdb/bolt/bolt_unix.go
  16. 0 90
      vendor/github.com/boltdb/bolt/bolt_unix_solaris.go
  17. 0 144
      vendor/github.com/boltdb/bolt/bolt_windows.go
  18. 0 8
      vendor/github.com/boltdb/bolt/boltsync_unix.go
  19. 0 778
      vendor/github.com/boltdb/bolt/bucket.go
  20. 0 400
      vendor/github.com/boltdb/bolt/cursor.go
  21. 0 1036
      vendor/github.com/boltdb/bolt/db.go
  22. 0 44
      vendor/github.com/boltdb/bolt/doc.go
  23. 0 71
      vendor/github.com/boltdb/bolt/errors.go
  24. 0 248
      vendor/github.com/boltdb/bolt/freelist.go
  25. 0 604
      vendor/github.com/boltdb/bolt/node.go
  26. 0 178
      vendor/github.com/boltdb/bolt/page.go
  27. 0 682
      vendor/github.com/boltdb/bolt/tx.go
  28. 2 1
      vendor/github.com/containerd/containerd/README.md
  29. 2 2
      vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go
  30. 1 1
      vendor/github.com/containerd/containerd/api/services/content/v1/content.proto
  31. 2 2
      vendor/github.com/containerd/containerd/api/services/events/v1/events.pb.go
  32. 1 1
      vendor/github.com/containerd/containerd/api/services/events/v1/events.proto
  33. 86 2
      vendor/github.com/containerd/containerd/archive/compression/compression.go
  34. 9 0
      vendor/github.com/containerd/containerd/cio/io.go
  35. 0 58
      vendor/github.com/containerd/containerd/container_opts_unix.go
  36. 3 3
      vendor/github.com/containerd/containerd/content/helpers.go
  37. 3 1
      vendor/github.com/containerd/containerd/content/local/store.go
  38. 4 4
      vendor/github.com/containerd/containerd/content/local/writer.go
  39. 3 3
      vendor/github.com/containerd/containerd/content/proxy/content_writer.go
  40. 1 1
      vendor/github.com/containerd/containerd/contrib/seccomp/seccomp.go
  41. 1 1
      vendor/github.com/containerd/containerd/events/exchange/exchange.go
  42. 58 0
      vendor/github.com/containerd/containerd/export.go
  43. 254 0
      vendor/github.com/containerd/containerd/images/archive/importer.go
  44. 86 0
      vendor/github.com/containerd/containerd/images/archive/reference.go
  45. 1 1
      vendor/github.com/containerd/containerd/images/importexport.go
  46. 104 56
      vendor/github.com/containerd/containerd/import.go
  47. 26 15
      vendor/github.com/containerd/containerd/install.go
  48. 9 0
      vendor/github.com/containerd/containerd/install_opts.go
  49. 1 1
      vendor/github.com/containerd/containerd/metadata/bolt.go
  50. 1 1
      vendor/github.com/containerd/containerd/metadata/boltutil/helpers.go
  51. 4 4
      vendor/github.com/containerd/containerd/metadata/buckets.go
  52. 1 1
      vendor/github.com/containerd/containerd/metadata/containers.go
  53. 6 11
      vendor/github.com/containerd/containerd/metadata/content.go
  54. 2 2
      vendor/github.com/containerd/containerd/metadata/db.go
  55. 1 1
      vendor/github.com/containerd/containerd/metadata/gc.go
  56. 1 1
      vendor/github.com/containerd/containerd/metadata/images.go
  57. 1 1
      vendor/github.com/containerd/containerd/metadata/leases.go
  58. 13 1
      vendor/github.com/containerd/containerd/metadata/migrations.go
  59. 1 1
      vendor/github.com/containerd/containerd/metadata/namespaces.go
  60. 1 1
      vendor/github.com/containerd/containerd/metadata/snapshot.go
  61. 4 0
      vendor/github.com/containerd/containerd/mount/mount_windows.go
  62. 213 6
      vendor/github.com/containerd/containerd/oci/spec.go
  63. 881 33
      vendor/github.com/containerd/containerd/oci/spec_opts.go
  64. 0 733
      vendor/github.com/containerd/containerd/oci/spec_opts_unix.go
  65. 0 89
      vendor/github.com/containerd/containerd/oci/spec_opts_windows.go
  66. 0 188
      vendor/github.com/containerd/containerd/oci/spec_unix.go
  67. 0 5
      vendor/github.com/containerd/containerd/platforms/defaults.go
  68. 6 26
      vendor/github.com/containerd/containerd/platforms/defaults_unix.go
  69. 10 10
      vendor/github.com/containerd/containerd/platforms/defaults_windows.go
  70. 1 1
      vendor/github.com/containerd/containerd/remotes/docker/fetcher.go
  71. 1 1
      vendor/github.com/containerd/containerd/remotes/docker/httpreadseeker.go
  72. 53 2
      vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go
  73. 1 1
      vendor/github.com/containerd/containerd/runtime/v1/linux/proc/exec.go
  74. 4 4
      vendor/github.com/containerd/containerd/runtime/v1/linux/proc/exec_state.go
  75. 10 3
      vendor/github.com/containerd/containerd/runtime/v1/linux/proc/init.go
  76. 0 1
      vendor/github.com/containerd/containerd/runtime/v1/linux/proc/io.go
  77. 4 12
      vendor/github.com/containerd/containerd/runtime/v1/linux/runtime.go
  78. 2 3
      vendor/github.com/containerd/containerd/runtime/v1/linux/task.go
  79. 40 7
      vendor/github.com/containerd/containerd/runtime/v1/shim/service.go
  80. 1 1
      vendor/github.com/containerd/containerd/services/server/server.go
  81. 1 1
      vendor/github.com/containerd/containerd/sys/socket_unix.go
  82. 4 1
      vendor/github.com/containerd/containerd/task.go
  83. 62 0
      vendor/github.com/containerd/containerd/task_opts.go
  84. 19 10
      vendor/github.com/containerd/containerd/task_opts_unix.go
  85. 21 20
      vendor/github.com/containerd/containerd/vendor.conf
  86. 657 0
      vendor/github.com/containerd/continuity/context.go
  87. 88 0
      vendor/github.com/containerd/continuity/digests.go
  88. 0 13
      vendor/github.com/containerd/continuity/driver/driver_unix.go
  89. 19 0
      vendor/github.com/containerd/continuity/driver/lchmod_linux.go
  90. 14 0
      vendor/github.com/containerd/continuity/driver/lchmod_unix.go
  91. 2 2
      vendor/github.com/containerd/continuity/fs/du.go
  92. 7 1
      vendor/github.com/containerd/continuity/fs/du_unix.go
  93. 7 1
      vendor/github.com/containerd/continuity/fs/du_windows.go
  94. 113 0
      vendor/github.com/containerd/continuity/groups_unix.go
  95. 57 0
      vendor/github.com/containerd/continuity/hardlinks.go
  96. 36 0
      vendor/github.com/containerd/continuity/hardlinks_unix.go
  97. 12 0
      vendor/github.com/containerd/continuity/hardlinks_windows.go
  98. 47 0
      vendor/github.com/containerd/continuity/ioutils.go
  99. 144 0
      vendor/github.com/containerd/continuity/manifest.go
  100. 3 0
      vendor/github.com/containerd/continuity/proto/gen.go

+ 8 - 12
daemon/cluster/noderunner.go

@@ -3,7 +3,6 @@ package cluster // import "github.com/docker/docker/daemon/cluster"
 import (
 	"context"
 	"fmt"
-	"net"
 	"path/filepath"
 	"runtime"
 	"strings"
@@ -14,6 +13,7 @@ import (
 	"github.com/docker/docker/daemon/cluster/executor/container"
 	lncluster "github.com/docker/libnetwork/cluster"
 	swarmapi "github.com/docker/swarmkit/api"
+	"github.com/docker/swarmkit/manager/allocator/cnmallocator"
 	swarmnode "github.com/docker/swarmkit/node"
 	"github.com/pkg/errors"
 	"github.com/sirupsen/logrus"
@@ -115,12 +115,6 @@ func (n *nodeRunner) start(conf nodeStartConfig) error {
 		joinAddr = conf.RemoteAddr
 	}
 
-	var defaultAddrPool []*net.IPNet
-	for _, address := range conf.DefaultAddressPool {
-		if _, b, err := net.ParseCIDR(address); err == nil {
-			defaultAddrPool = append(defaultAddrPool, b)
-		}
-	}
 	// Hostname is not set here. Instead, it is obtained from
 	// the node description that is reported periodically
 	swarmnodeConfig := swarmnode.Config{
@@ -128,11 +122,13 @@ func (n *nodeRunner) start(conf nodeStartConfig) error {
 		ListenControlAPI:   control,
 		ListenRemoteAPI:    conf.ListenAddr,
 		AdvertiseRemoteAPI: conf.AdvertiseAddr,
-		DefaultAddrPool:    defaultAddrPool,
-		SubnetSize:         int(conf.SubnetSize),
-		JoinAddr:           joinAddr,
-		StateDir:           n.cluster.root,
-		JoinToken:          conf.joinToken,
+		NetworkConfig: &cnmallocator.NetworkConfig{
+			DefaultAddrPool: conf.DefaultAddressPool,
+			SubnetSize:      conf.SubnetSize,
+		},
+		JoinAddr:  joinAddr,
+		StateDir:  n.cluster.root,
+		JoinToken: conf.joinToken,
 		Executor: container.NewExecutor(
 			n.cluster.config.Backend,
 			n.cluster.config.PluginBackend,

+ 6 - 6
vendor.conf

@@ -47,7 +47,7 @@ github.com/sean-/seed e2103e2c35297fb7e17febb81e49b312087a2372
 github.com/hashicorp/go-sockaddr 6d291a969b86c4b633730bfc6b8b9d64c3aafed9
 github.com/hashicorp/go-multierror fcdddc395df1ddf4247c69bd436e84cfa0733f7e
 github.com/hashicorp/serf 598c54895cc5a7b1a24a398d635e8c0ea0959870
-github.com/docker/libkv 1d8431073ae03cdaedb198a89722f3aab6d418ef
+github.com/docker/libkv 458977154600b9f23984d9f4b82e79570b5ae12b
 github.com/vishvananda/netns 604eaf189ee867d8c147fafc28def2394e878d25
 github.com/vishvananda/netlink b2de5d10e38ecce8607e6b438b6d174f389a004e
 
@@ -59,7 +59,6 @@ github.com/coreos/etcd v3.2.1
 github.com/coreos/go-semver v0.2.0
 github.com/ugorji/go f1f1a805ed361a0e078bb537e4ea78cd37dcf065
 github.com/hashicorp/consul v0.5.2
-github.com/boltdb/bolt fff57c100f4dea1905678da7e90d92429dff2904
 github.com/miekg/dns v1.0.7
 github.com/ishidawataru/sctp 07191f837fedd2f13d1ec7b5f885f0f3ec54b1cb
 go.etcd.io/bbolt v1.3.1-etcd.8
@@ -115,18 +114,19 @@ github.com/googleapis/gax-go v2.0.0
 google.golang.org/genproto 694d95ba50e67b2e363f3483057db5d4910c18f9
 
 # containerd
-github.com/containerd/containerd v1.2.0-beta.2
+github.com/containerd/containerd v1.2.0-rc.0
 github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c
-github.com/containerd/continuity d3c23511c1bf5851696cba83143d9cbcd666869b
+github.com/containerd/continuity f44b615e492bdfb371aae2f76ec694d9da1db537
 github.com/containerd/cgroups 5e610833b72089b37d0e615de9a92dfc043757c2
 github.com/containerd/console c12b1e7919c14469339a5d38f2f8ed9b64a9de23
-github.com/containerd/go-runc edcf3de1f4971445c42d61f20d506b30612aa031
+github.com/containerd/go-runc 5a6d9f37cfa36b15efba46dc7ea349fa9b7143c3
 github.com/containerd/typeurl a93fcdb778cd272c6e9b3028b2f42d813e785d40
 github.com/containerd/ttrpc 94dde388801693c54f88a6596f713b51a8b30b2d
 github.com/gogo/googleapis 08a7655d27152912db7aaf4f983275eaf8d128ef
+github.com/containerd/cri 9f39e3289533fc228c5e5fcac0a6dbdd60c6047b # release/1.2 branch
 
 # cluster
-github.com/docker/swarmkit cfa742c8abe6f8e922f6e4e920153c408e7d9c3b
+github.com/docker/swarmkit 3044c576a8a970d3079492b585054f29e96e27f1
 github.com/gogo/protobuf v1.0.0
 github.com/cloudflare/cfssl 1.3.2
 github.com/fernet/fernet-go 1b2437bc582b3cfbb341ee5a29f8ef5b42912ff2

+ 0 - 20
vendor/github.com/boltdb/bolt/LICENSE

@@ -1,20 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2013 Ben Johnson
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

+ 0 - 857
vendor/github.com/boltdb/bolt/README.md

@@ -1,857 +0,0 @@
-Bolt [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.svg?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.svg)](https://godoc.org/github.com/boltdb/bolt) ![Version](https://img.shields.io/badge/version-1.2.1-green.svg)
-====
-
-Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas]
-[LMDB project][lmdb]. The goal of the project is to provide a simple,
-fast, and reliable database for projects that don't require a full database
-server such as Postgres or MySQL.
-
-Since Bolt is meant to be used as such a low-level piece of functionality,
-simplicity is key. The API will be small and only focus on getting values
-and setting values. That's it.
-
-[hyc_symas]: https://twitter.com/hyc_symas
-[lmdb]: http://symas.com/mdb/
-
-## Project Status
-
-Bolt is stable, the API is fixed, and the file format is fixed. Full unit
-test coverage and randomized black box testing are used to ensure database
-consistency and thread safety. Bolt is currently in high-load production
-environments serving databases as large as 1TB. Many companies such as
-Shopify and Heroku use Bolt-backed services every day.
-
-## Table of Contents
-
-- [Getting Started](#getting-started)
-  - [Installing](#installing)
-  - [Opening a database](#opening-a-database)
-  - [Transactions](#transactions)
-    - [Read-write transactions](#read-write-transactions)
-    - [Read-only transactions](#read-only-transactions)
-    - [Batch read-write transactions](#batch-read-write-transactions)
-    - [Managing transactions manually](#managing-transactions-manually)
-  - [Using buckets](#using-buckets)
-  - [Using key/value pairs](#using-keyvalue-pairs)
-  - [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket)
-  - [Iterating over keys](#iterating-over-keys)
-    - [Prefix scans](#prefix-scans)
-    - [Range scans](#range-scans)
-    - [ForEach()](#foreach)
-  - [Nested buckets](#nested-buckets)
-  - [Database backups](#database-backups)
-  - [Statistics](#statistics)
-  - [Read-Only Mode](#read-only-mode)
-  - [Mobile Use (iOS/Android)](#mobile-use-iosandroid)
-- [Resources](#resources)
-- [Comparison with other databases](#comparison-with-other-databases)
-  - [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases)
-  - [LevelDB, RocksDB](#leveldb-rocksdb)
-  - [LMDB](#lmdb)
-- [Caveats & Limitations](#caveats--limitations)
-- [Reading the Source](#reading-the-source)
-- [Other Projects Using Bolt](#other-projects-using-bolt)
-
-## Getting Started
-
-### Installing
-
-To start using Bolt, install Go and run `go get`:
-
-```sh
-$ go get github.com/boltdb/bolt/...
-```
-
-This will retrieve the library and install the `bolt` command line utility into
-your `$GOBIN` path.
-
-
-### Opening a database
-
-The top-level object in Bolt is a `DB`. It is represented as a single file on
-your disk and represents a consistent snapshot of your data.
-
-To open your database, simply use the `bolt.Open()` function:
-
-```go
-package main
-
-import (
-	"log"
-
-	"github.com/boltdb/bolt"
-)
-
-func main() {
-	// Open the my.db data file in your current directory.
-	// It will be created if it doesn't exist.
-	db, err := bolt.Open("my.db", 0600, nil)
-	if err != nil {
-		log.Fatal(err)
-	}
-	defer db.Close()
-
-	...
-}
-```
-
-Please note that Bolt obtains a file lock on the data file so multiple processes
-cannot open the same database at the same time. Opening an already open Bolt
-database will cause it to hang until the other process closes it. To prevent
-an indefinite wait you can pass a timeout option to the `Open()` function:
-
-```go
-db, err := bolt.Open("my.db", 0600, &bolt.Options{Timeout: 1 * time.Second})
-```
-
-
-### Transactions
-
-Bolt allows only one read-write transaction at a time but allows as many
-read-only transactions as you want at a time. Each transaction has a consistent
-view of the data as it existed when the transaction started.
-
-Individual transactions and all objects created from them (e.g. buckets, keys)
-are not thread safe. To work with data in multiple goroutines you must start
-a transaction for each one or use locking to ensure only one goroutine accesses
-a transaction at a time. Creating transaction from the `DB` is thread safe.
-
-Read-only transactions and read-write transactions should not depend on one
-another and generally shouldn't be opened simultaneously in the same goroutine.
-This can cause a deadlock as the read-write transaction needs to periodically
-re-map the data file but it cannot do so while a read-only transaction is open.
-
-
-#### Read-write transactions
-
-To start a read-write transaction, you can use the `DB.Update()` function:
-
-```go
-err := db.Update(func(tx *bolt.Tx) error {
-	...
-	return nil
-})
-```
-
-Inside the closure, you have a consistent view of the database. You commit the
-transaction by returning `nil` at the end. You can also rollback the transaction
-at any point by returning an error. All database operations are allowed inside
-a read-write transaction.
-
-Always check the return error as it will report any disk failures that can cause
-your transaction to not complete. If you return an error within your closure
-it will be passed through.
-
-
-#### Read-only transactions
-
-To start a read-only transaction, you can use the `DB.View()` function:
-
-```go
-err := db.View(func(tx *bolt.Tx) error {
-	...
-	return nil
-})
-```
-
-You also get a consistent view of the database within this closure, however,
-no mutating operations are allowed within a read-only transaction. You can only
-retrieve buckets, retrieve values, and copy the database within a read-only
-transaction.
-
-
-#### Batch read-write transactions
-
-Each `DB.Update()` waits for disk to commit the writes. This overhead
-can be minimized by combining multiple updates with the `DB.Batch()`
-function:
-
-```go
-err := db.Batch(func(tx *bolt.Tx) error {
-	...
-	return nil
-})
-```
-
-Concurrent Batch calls are opportunistically combined into larger
-transactions. Batch is only useful when there are multiple goroutines
-calling it.
-
-The trade-off is that `Batch` can call the given
-function multiple times, if parts of the transaction fail. The
-function must be idempotent and side effects must take effect only
-after a successful return from `DB.Batch()`.
-
-For example: don't display messages from inside the function, instead
-set variables in the enclosing scope:
-
-```go
-var id uint64
-err := db.Batch(func(tx *bolt.Tx) error {
-	// Find last key in bucket, decode as bigendian uint64, increment
-	// by one, encode back to []byte, and add new key.
-	...
-	id = newValue
-	return nil
-})
-if err != nil {
-	return ...
-}
-fmt.Println("Allocated ID %d", id)
-```
-
-
-#### Managing transactions manually
-
-The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()`
-function. These helper functions will start the transaction, execute a function,
-and then safely close your transaction if an error is returned. This is the
-recommended way to use Bolt transactions.
-
-However, sometimes you may want to manually start and end your transactions.
-You can use the `DB.Begin()` function directly but **please** be sure to close
-the transaction.
-
-```go
-// Start a writable transaction.
-tx, err := db.Begin(true)
-if err != nil {
-    return err
-}
-defer tx.Rollback()
-
-// Use the transaction...
-_, err := tx.CreateBucket([]byte("MyBucket"))
-if err != nil {
-    return err
-}
-
-// Commit the transaction and check for error.
-if err := tx.Commit(); err != nil {
-    return err
-}
-```
-
-The first argument to `DB.Begin()` is a boolean stating if the transaction
-should be writable.
-
-
-### Using buckets
-
-Buckets are collections of key/value pairs within the database. All keys in a
-bucket must be unique. You can create a bucket using the `DB.CreateBucket()`
-function:
-
-```go
-db.Update(func(tx *bolt.Tx) error {
-	b, err := tx.CreateBucket([]byte("MyBucket"))
-	if err != nil {
-		return fmt.Errorf("create bucket: %s", err)
-	}
-	return nil
-})
-```
-
-You can also create a bucket only if it doesn't exist by using the
-`Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this
-function for all your top-level buckets after you open your database so you can
-guarantee that they exist for future transactions.
-
-To delete a bucket, simply call the `Tx.DeleteBucket()` function.
-
-
-### Using key/value pairs
-
-To save a key/value pair to a bucket, use the `Bucket.Put()` function:
-
-```go
-db.Update(func(tx *bolt.Tx) error {
-	b := tx.Bucket([]byte("MyBucket"))
-	err := b.Put([]byte("answer"), []byte("42"))
-	return err
-})
-```
-
-This will set the value of the `"answer"` key to `"42"` in the `MyBucket`
-bucket. To retrieve this value, we can use the `Bucket.Get()` function:
-
-```go
-db.View(func(tx *bolt.Tx) error {
-	b := tx.Bucket([]byte("MyBucket"))
-	v := b.Get([]byte("answer"))
-	fmt.Printf("The answer is: %s\n", v)
-	return nil
-})
-```
-
-The `Get()` function does not return an error because its operation is
-guaranteed to work (unless there is some kind of system failure). If the key
-exists then it will return its byte slice value. If it doesn't exist then it
-will return `nil`. It's important to note that you can have a zero-length value
-set to a key which is different than the key not existing.
-
-Use the `Bucket.Delete()` function to delete a key from the bucket.
-
-Please note that values returned from `Get()` are only valid while the
-transaction is open. If you need to use a value outside of the transaction
-then you must use `copy()` to copy it to another byte slice.
-
-
-### Autoincrementing integer for the bucket
-By using the `NextSequence()` function, you can let Bolt determine a sequence
-which can be used as the unique identifier for your key/value pairs. See the
-example below.
-
-```go
-// CreateUser saves u to the store. The new user ID is set on u once the data is persisted.
-func (s *Store) CreateUser(u *User) error {
-    return s.db.Update(func(tx *bolt.Tx) error {
-        // Retrieve the users bucket.
-        // This should be created when the DB is first opened.
-        b := tx.Bucket([]byte("users"))
-
-        // Generate ID for the user.
-        // This returns an error only if the Tx is closed or not writeable.
-        // That can't happen in an Update() call so I ignore the error check.
-        id, _ := b.NextSequence()
-        u.ID = int(id)
-
-        // Marshal user data into bytes.
-        buf, err := json.Marshal(u)
-        if err != nil {
-            return err
-        }
-
-        // Persist bytes to users bucket.
-        return b.Put(itob(u.ID), buf)
-    })
-}
-
-// itob returns an 8-byte big endian representation of v.
-func itob(v int) []byte {
-    b := make([]byte, 8)
-    binary.BigEndian.PutUint64(b, uint64(v))
-    return b
-}
-
-type User struct {
-    ID int
-    ...
-}
-```
-
-### Iterating over keys
-
-Bolt stores its keys in byte-sorted order within a bucket. This makes sequential
-iteration over these keys extremely fast. To iterate over keys we'll use a
-`Cursor`:
-
-```go
-db.View(func(tx *bolt.Tx) error {
-	// Assume bucket exists and has keys
-	b := tx.Bucket([]byte("MyBucket"))
-
-	c := b.Cursor()
-
-	for k, v := c.First(); k != nil; k, v = c.Next() {
-		fmt.Printf("key=%s, value=%s\n", k, v)
-	}
-
-	return nil
-})
-```
-
-The cursor allows you to move to a specific point in the list of keys and move
-forward or backward through the keys one at a time.
-
-The following functions are available on the cursor:
-
-```
-First()  Move to the first key.
-Last()   Move to the last key.
-Seek()   Move to a specific key.
-Next()   Move to the next key.
-Prev()   Move to the previous key.
-```
-
-Each of those functions has a return signature of `(key []byte, value []byte)`.
-When you have iterated to the end of the cursor then `Next()` will return a
-`nil` key.  You must seek to a position using `First()`, `Last()`, or `Seek()`
-before calling `Next()` or `Prev()`. If you do not seek to a position then
-these functions will return a `nil` key.
-
-During iteration, if the key is non-`nil` but the value is `nil`, that means
-the key refers to a bucket rather than a value.  Use `Bucket.Bucket()` to
-access the sub-bucket.
-
-
-#### Prefix scans
-
-To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`:
-
-```go
-db.View(func(tx *bolt.Tx) error {
-	// Assume bucket exists and has keys
-	c := tx.Bucket([]byte("MyBucket")).Cursor()
-
-	prefix := []byte("1234")
-	for k, v := c.Seek(prefix); bytes.HasPrefix(k, prefix); k, v = c.Next() {
-		fmt.Printf("key=%s, value=%s\n", k, v)
-	}
-
-	return nil
-})
-```
-
-#### Range scans
-
-Another common use case is scanning over a range such as a time range. If you
-use a sortable time encoding such as RFC3339 then you can query a specific
-date range like this:
-
-```go
-db.View(func(tx *bolt.Tx) error {
-	// Assume our events bucket exists and has RFC3339 encoded time keys.
-	c := tx.Bucket([]byte("Events")).Cursor()
-
-	// Our time range spans the 90's decade.
-	min := []byte("1990-01-01T00:00:00Z")
-	max := []byte("2000-01-01T00:00:00Z")
-
-	// Iterate over the 90's.
-	for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() {
-		fmt.Printf("%s: %s\n", k, v)
-	}
-
-	return nil
-})
-```
-
-Note that, while RFC3339 is sortable, the Golang implementation of RFC3339Nano does not use a fixed number of digits after the decimal point and is therefore not sortable.
-
-
-#### ForEach()
-
-You can also use the function `ForEach()` if you know you'll be iterating over
-all the keys in a bucket:
-
-```go
-db.View(func(tx *bolt.Tx) error {
-	// Assume bucket exists and has keys
-	b := tx.Bucket([]byte("MyBucket"))
-
-	b.ForEach(func(k, v []byte) error {
-		fmt.Printf("key=%s, value=%s\n", k, v)
-		return nil
-	})
-	return nil
-})
-```
-
-Please note that keys and values in `ForEach()` are only valid while
-the transaction is open. If you need to use a key or value outside of
-the transaction, you must use `copy()` to copy it to another byte
-slice.
-
-### Nested buckets
-
-You can also store a bucket in a key to create nested buckets. The API is the
-same as the bucket management API on the `DB` object:
-
-```go
-func (*Bucket) CreateBucket(key []byte) (*Bucket, error)
-func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error)
-func (*Bucket) DeleteBucket(key []byte) error
-```
-
-
-### Database backups
-
-Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()`
-function to write a consistent view of the database to a writer. If you call
-this from a read-only transaction, it will perform a hot backup and not block
-your other database reads and writes.
-
-By default, it will use a regular file handle which will utilize the operating
-system's page cache. See the [`Tx`](https://godoc.org/github.com/boltdb/bolt#Tx)
-documentation for information about optimizing for larger-than-RAM datasets.
-
-One common use case is to backup over HTTP so you can use tools like `cURL` to
-do database backups:
-
-```go
-func BackupHandleFunc(w http.ResponseWriter, req *http.Request) {
-	err := db.View(func(tx *bolt.Tx) error {
-		w.Header().Set("Content-Type", "application/octet-stream")
-		w.Header().Set("Content-Disposition", `attachment; filename="my.db"`)
-		w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size())))
-		_, err := tx.WriteTo(w)
-		return err
-	})
-	if err != nil {
-		http.Error(w, err.Error(), http.StatusInternalServerError)
-	}
-}
-```
-
-Then you can backup using this command:
-
-```sh
-$ curl http://localhost/backup > my.db
-```
-
-Or you can open your browser to `http://localhost/backup` and it will download
-automatically.
-
-If you want to backup to another file you can use the `Tx.CopyFile()` helper
-function.
-
-
-### Statistics
-
-The database keeps a running count of many of the internal operations it
-performs so you can better understand what's going on. By grabbing a snapshot
-of these stats at two points in time we can see what operations were performed
-in that time range.
-
-For example, we could start a goroutine to log stats every 10 seconds:
-
-```go
-go func() {
-	// Grab the initial stats.
-	prev := db.Stats()
-
-	for {
-		// Wait for 10s.
-		time.Sleep(10 * time.Second)
-
-		// Grab the current stats and diff them.
-		stats := db.Stats()
-		diff := stats.Sub(&prev)
-
-		// Encode stats to JSON and print to STDERR.
-		json.NewEncoder(os.Stderr).Encode(diff)
-
-		// Save stats for the next loop.
-		prev = stats
-	}
-}()
-```
-
-It's also useful to pipe these stats to a service such as statsd for monitoring
-or to provide an HTTP endpoint that will perform a fixed-length sample.
-
-
-### Read-Only Mode
-
-Sometimes it is useful to create a shared, read-only Bolt database. To this,
-set the `Options.ReadOnly` flag when opening your database. Read-only mode
-uses a shared lock to allow multiple processes to read from the database but
-it will block any processes from opening the database in read-write mode.
-
-```go
-db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true})
-if err != nil {
-	log.Fatal(err)
-}
-```
-
-### Mobile Use (iOS/Android)
-
-Bolt is able to run on mobile devices by leveraging the binding feature of the
-[gomobile](https://github.com/golang/mobile) tool. Create a struct that will
-contain your database logic and a reference to a `*bolt.DB` with a initializing
-constructor that takes in a filepath where the database file will be stored.
-Neither Android nor iOS require extra permissions or cleanup from using this method.
-
-```go
-func NewBoltDB(filepath string) *BoltDB {
-	db, err := bolt.Open(filepath+"/demo.db", 0600, nil)
-	if err != nil {
-		log.Fatal(err)
-	}
-
-	return &BoltDB{db}
-}
-
-type BoltDB struct {
-	db *bolt.DB
-	...
-}
-
-func (b *BoltDB) Path() string {
-	return b.db.Path()
-}
-
-func (b *BoltDB) Close() {
-	b.db.Close()
-}
-```
-
-Database logic should be defined as methods on this wrapper struct.
-
-To initialize this struct from the native language (both platforms now sync
-their local storage to the cloud. These snippets disable that functionality for the
-database file):
-
-#### Android
-
-```java
-String path;
-if (android.os.Build.VERSION.SDK_INT >=android.os.Build.VERSION_CODES.LOLLIPOP){
-    path = getNoBackupFilesDir().getAbsolutePath();
-} else{
-    path = getFilesDir().getAbsolutePath();
-}
-Boltmobiledemo.BoltDB boltDB = Boltmobiledemo.NewBoltDB(path)
-```
-
-#### iOS
-
-```objc
-- (void)demo {
-    NSString* path = [NSSearchPathForDirectoriesInDomains(NSLibraryDirectory,
-                                                          NSUserDomainMask,
-                                                          YES) objectAtIndex:0];
-	GoBoltmobiledemoBoltDB * demo = GoBoltmobiledemoNewBoltDB(path);
-	[self addSkipBackupAttributeToItemAtPath:demo.path];
-	//Some DB Logic would go here
-	[demo close];
-}
-
-- (BOOL)addSkipBackupAttributeToItemAtPath:(NSString *) filePathString
-{
-    NSURL* URL= [NSURL fileURLWithPath: filePathString];
-    assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]);
-
-    NSError *error = nil;
-    BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES]
-                                  forKey: NSURLIsExcludedFromBackupKey error: &error];
-    if(!success){
-        NSLog(@"Error excluding %@ from backup %@", [URL lastPathComponent], error);
-    }
-    return success;
-}
-
-```
-
-## Resources
-
-For more information on getting started with Bolt, check out the following articles:
-
-* [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch).
-* [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville
-
-
-## Comparison with other databases
-
-### Postgres, MySQL, & other relational databases
-
-Relational databases structure data into rows and are only accessible through
-the use of SQL. This approach provides flexibility in how you store and query
-your data but also incurs overhead in parsing and planning SQL statements. Bolt
-accesses all data by a byte slice key. This makes Bolt fast to read and write
-data by key but provides no built-in support for joining values together.
-
-Most relational databases (with the exception of SQLite) are standalone servers
-that run separately from your application. This gives your systems
-flexibility to connect multiple application servers to a single database
-server but also adds overhead in serializing and transporting data over the
-network. Bolt runs as a library included in your application so all data access
-has to go through your application's process. This brings data closer to your
-application but limits multi-process access to the data.
-
-
-### LevelDB, RocksDB
-
-LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that
-they are libraries bundled into the application, however, their underlying
-structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes
-random writes by using a write ahead log and multi-tiered, sorted files called
-SSTables. Bolt uses a B+tree internally and only a single file. Both approaches
-have trade-offs.
-
-If you require a high random write throughput (>10,000 w/sec) or you need to use
-spinning disks then LevelDB could be a good choice. If your application is
-read-heavy or does a lot of range scans then Bolt could be a good choice.
-
-One other important consideration is that LevelDB does not have transactions.
-It supports batch writing of key/values pairs and it supports read snapshots
-but it will not give you the ability to do a compare-and-swap operation safely.
-Bolt supports fully serializable ACID transactions.
-
-
-### LMDB
-
-Bolt was originally a port of LMDB so it is architecturally similar. Both use
-a B+tree, have ACID semantics with fully serializable transactions, and support
-lock-free MVCC using a single writer and multiple readers.
-
-The two projects have somewhat diverged. LMDB heavily focuses on raw performance
-while Bolt has focused on simplicity and ease of use. For example, LMDB allows
-several unsafe actions such as direct writes for the sake of performance. Bolt
-opts to disallow actions which can leave the database in a corrupted state. The
-only exception to this in Bolt is `DB.NoSync`.
-
-There are also a few differences in API. LMDB requires a maximum mmap size when
-opening an `mdb_env` whereas Bolt will handle incremental mmap resizing
-automatically. LMDB overloads the getter and setter functions with multiple
-flags whereas Bolt splits these specialized cases into their own functions.
-
-
-## Caveats & Limitations
-
-It's important to pick the right tool for the job and Bolt is no exception.
-Here are a few things to note when evaluating and using Bolt:
-
-* Bolt is good for read intensive workloads. Sequential write performance is
-  also fast but random writes can be slow. You can use `DB.Batch()` or add a
-  write-ahead log to help mitigate this issue.
-
-* Bolt uses a B+tree internally so there can be a lot of random page access.
-  SSDs provide a significant performance boost over spinning disks.
-
-* Try to avoid long running read transactions. Bolt uses copy-on-write so
-  old pages cannot be reclaimed while an old transaction is using them.
-
-* Byte slices returned from Bolt are only valid during a transaction. Once the
-  transaction has been committed or rolled back then the memory they point to
-  can be reused by a new page or can be unmapped from virtual memory and you'll
-  see an `unexpected fault address` panic when accessing it.
-
-* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for
-  buckets that have random inserts will cause your database to have very poor
-  page utilization.
-
-* Use larger buckets in general. Smaller buckets causes poor page utilization
-  once they become larger than the page size (typically 4KB).
-
-* Bulk loading a lot of random writes into a new bucket can be slow as the
-  page will not split until the transaction is committed. Randomly inserting
-  more than 100,000 key/value pairs into a single new bucket in a single
-  transaction is not advised.
-
-* Bolt uses a memory-mapped file so the underlying operating system handles the
-  caching of the data. Typically, the OS will cache as much of the file as it
-  can in memory and will release memory as needed to other processes. This means
-  that Bolt can show very high memory usage when working with large databases.
-  However, this is expected and the OS will release memory as needed. Bolt can
-  handle databases much larger than the available physical RAM, provided its
-  memory-map fits in the process virtual address space. It may be problematic
-  on 32-bits systems.
-
-* The data structures in the Bolt database are memory mapped so the data file
-  will be endian specific. This means that you cannot copy a Bolt file from a
-  little endian machine to a big endian machine and have it work. For most
-  users this is not a concern since most modern CPUs are little endian.
-
-* Because of the way pages are laid out on disk, Bolt cannot truncate data files
-  and return free pages back to the disk. Instead, Bolt maintains a free list
-  of unused pages within its data file. These free pages can be reused by later
-  transactions. This works well for many use cases as databases generally tend
-  to grow. However, it's important to note that deleting large chunks of data
-  will not allow you to reclaim that space on disk.
-
-  For more information on page allocation, [see this comment][page-allocation].
-
-[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638
-
-
-## Reading the Source
-
-Bolt is a relatively small code base (<3KLOC) for an embedded, serializable,
-transactional key/value database so it can be a good starting point for people
-interested in how databases work.
-
-The best places to start are the main entry points into Bolt:
-
-- `Open()` - Initializes the reference to the database. It's responsible for
-  creating the database if it doesn't exist, obtaining an exclusive lock on the
-  file, reading the meta pages, & memory-mapping the file.
-
-- `DB.Begin()` - Starts a read-only or read-write transaction depending on the
-  value of the `writable` argument. This requires briefly obtaining the "meta"
-  lock to keep track of open transactions. Only one read-write transaction can
-  exist at a time so the "rwlock" is acquired during the life of a read-write
-  transaction.
-
-- `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the
-  arguments, a cursor is used to traverse the B+tree to the page and position
-  where they key & value will be written. Once the position is found, the bucket
-  materializes the underlying page and the page's parent pages into memory as
-  "nodes". These nodes are where mutations occur during read-write transactions.
-  These changes get flushed to disk during commit.
-
-- `Bucket.Get()` - Retrieves a key/value pair from a bucket. This uses a cursor
-  to move to the page & position of a key/value pair. During a read-only
-  transaction, the key and value data is returned as a direct reference to the
-  underlying mmap file so there's no allocation overhead. For read-write
-  transactions, this data may reference the mmap file or one of the in-memory
-  node values.
-
-- `Cursor` - This object is simply for traversing the B+tree of on-disk pages
-  or in-memory nodes. It can seek to a specific key, move to the first or last
-  value, or it can move forward or backward. The cursor handles the movement up
-  and down the B+tree transparently to the end user.
-
-- `Tx.Commit()` - Converts the in-memory dirty nodes and the list of free pages
-  into pages to be written to disk. Writing to disk then occurs in two phases.
-  First, the dirty pages are written to disk and an `fsync()` occurs. Second, a
-  new meta page with an incremented transaction ID is written and another
-  `fsync()` occurs. This two phase write ensures that partially written data
-  pages are ignored in the event of a crash since the meta page pointing to them
-  is never written. Partially written meta pages are invalidated because they
-  are written with a checksum.
-
-If you have additional notes that could be helpful for others, please submit
-them via pull request.
-
-
-## Other Projects Using Bolt
-
-Below is a list of public, open source projects that use Bolt:
-
-* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files.
-* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard.
-* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside.
-* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb.
-* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics.
-* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects.
-* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday.
-* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations.
-* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite.
-* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin".
-* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka.
-* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed.
-* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt.
-* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site.
-* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage.
-* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters.
-* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend.
-* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend.
-* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server.
-* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read.
-* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics.
-* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data.
-* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system.
-* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware.
-* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs.
-* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems.
-* [stow](https://github.com/djherbis/stow) -  a persistence manager for objects
-  backed by boltdb.
-* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining
-  simple tx and key scans.
-* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets.
-* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service
-* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service.
-* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners.
-* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores.
-* [Storm](https://github.com/asdine/storm) - Simple and powerful ORM for BoltDB.
-* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB.
-* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings.
-* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend.
-* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files.
-* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter.
-* [torrent](https://github.com/anacrolix/torrent) - Full-featured BitTorrent client package and utilities in Go. BoltDB is a storage backend in development.
-
-If you are using Bolt in a project please send a pull request to add it to the list.

+ 0 - 10
vendor/github.com/boltdb/bolt/bolt_386.go

@@ -1,10 +0,0 @@
-package bolt
-
-// maxMapSize represents the largest mmap size supported by Bolt.
-const maxMapSize = 0x7FFFFFFF // 2GB
-
-// maxAllocSize is the size used when creating array pointers.
-const maxAllocSize = 0xFFFFFFF
-
-// Are unaligned load/stores broken on this arch?
-var brokenUnaligned = false

+ 0 - 10
vendor/github.com/boltdb/bolt/bolt_amd64.go

@@ -1,10 +0,0 @@
-package bolt
-
-// maxMapSize represents the largest mmap size supported by Bolt.
-const maxMapSize = 0xFFFFFFFFFFFF // 256TB
-
-// maxAllocSize is the size used when creating array pointers.
-const maxAllocSize = 0x7FFFFFFF
-
-// Are unaligned load/stores broken on this arch?
-var brokenUnaligned = false

+ 0 - 28
vendor/github.com/boltdb/bolt/bolt_arm.go

@@ -1,28 +0,0 @@
-package bolt
-
-import "unsafe"
-
-// maxMapSize represents the largest mmap size supported by Bolt.
-const maxMapSize = 0x7FFFFFFF // 2GB
-
-// maxAllocSize is the size used when creating array pointers.
-const maxAllocSize = 0xFFFFFFF
-
-// Are unaligned load/stores broken on this arch?
-var brokenUnaligned bool
-
-func init() {
-	// Simple check to see whether this arch handles unaligned load/stores
-	// correctly.
-
-	// ARM9 and older devices require load/stores to be from/to aligned
-	// addresses. If not, the lower 2 bits are cleared and that address is
-	// read in a jumbled up order.
-
-	// See http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html
-
-	raw := [6]byte{0xfe, 0xef, 0x11, 0x22, 0x22, 0x11}
-	val := *(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&raw)) + 2))
-
-	brokenUnaligned = val != 0x11222211
-}

+ 0 - 12
vendor/github.com/boltdb/bolt/bolt_arm64.go

@@ -1,12 +0,0 @@
-// +build arm64
-
-package bolt
-
-// maxMapSize represents the largest mmap size supported by Bolt.
-const maxMapSize = 0xFFFFFFFFFFFF // 256TB
-
-// maxAllocSize is the size used when creating array pointers.
-const maxAllocSize = 0x7FFFFFFF
-
-// Are unaligned load/stores broken on this arch?
-var brokenUnaligned = false

+ 0 - 10
vendor/github.com/boltdb/bolt/bolt_linux.go

@@ -1,10 +0,0 @@
-package bolt
-
-import (
-	"syscall"
-)
-
-// fdatasync flushes written data to a file descriptor.
-func fdatasync(db *DB) error {
-	return syscall.Fdatasync(int(db.file.Fd()))
-}

+ 0 - 27
vendor/github.com/boltdb/bolt/bolt_openbsd.go

@@ -1,27 +0,0 @@
-package bolt
-
-import (
-	"syscall"
-	"unsafe"
-)
-
-const (
-	msAsync      = 1 << iota // perform asynchronous writes
-	msSync                   // perform synchronous writes
-	msInvalidate             // invalidate cached data
-)
-
-func msync(db *DB) error {
-	_, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate)
-	if errno != 0 {
-		return errno
-	}
-	return nil
-}
-
-func fdatasync(db *DB) error {
-	if db.data != nil {
-		return msync(db)
-	}
-	return db.file.Sync()
-}

+ 0 - 9
vendor/github.com/boltdb/bolt/bolt_ppc.go

@@ -1,9 +0,0 @@
-// +build ppc
-
-package bolt
-
-// maxMapSize represents the largest mmap size supported by Bolt.
-const maxMapSize = 0x7FFFFFFF // 2GB
-
-// maxAllocSize is the size used when creating array pointers.
-const maxAllocSize = 0xFFFFFFF

+ 0 - 9
vendor/github.com/boltdb/bolt/bolt_ppc64.go

@@ -1,9 +0,0 @@
-// +build ppc64
-
-package bolt
-
-// maxMapSize represents the largest mmap size supported by Bolt.
-const maxMapSize = 0xFFFFFFFFFFFF // 256TB
-
-// maxAllocSize is the size used when creating array pointers.
-const maxAllocSize = 0x7FFFFFFF

+ 0 - 12
vendor/github.com/boltdb/bolt/bolt_ppc64le.go

@@ -1,12 +0,0 @@
-// +build ppc64le
-
-package bolt
-
-// maxMapSize represents the largest mmap size supported by Bolt.
-const maxMapSize = 0xFFFFFFFFFFFF // 256TB
-
-// maxAllocSize is the size used when creating array pointers.
-const maxAllocSize = 0x7FFFFFFF
-
-// Are unaligned load/stores broken on this arch?
-var brokenUnaligned = false

+ 0 - 12
vendor/github.com/boltdb/bolt/bolt_s390x.go

@@ -1,12 +0,0 @@
-// +build s390x
-
-package bolt
-
-// maxMapSize represents the largest mmap size supported by Bolt.
-const maxMapSize = 0xFFFFFFFFFFFF // 256TB
-
-// maxAllocSize is the size used when creating array pointers.
-const maxAllocSize = 0x7FFFFFFF
-
-// Are unaligned load/stores broken on this arch?
-var brokenUnaligned = false

+ 0 - 89
vendor/github.com/boltdb/bolt/bolt_unix.go

@@ -1,89 +0,0 @@
-// +build !windows,!plan9,!solaris
-
-package bolt
-
-import (
-	"fmt"
-	"os"
-	"syscall"
-	"time"
-	"unsafe"
-)
-
-// flock acquires an advisory lock on a file descriptor.
-func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
-	var t time.Time
-	for {
-		// If we're beyond our timeout then return an error.
-		// This can only occur after we've attempted a flock once.
-		if t.IsZero() {
-			t = time.Now()
-		} else if timeout > 0 && time.Since(t) > timeout {
-			return ErrTimeout
-		}
-		flag := syscall.LOCK_SH
-		if exclusive {
-			flag = syscall.LOCK_EX
-		}
-
-		// Otherwise attempt to obtain an exclusive lock.
-		err := syscall.Flock(int(db.file.Fd()), flag|syscall.LOCK_NB)
-		if err == nil {
-			return nil
-		} else if err != syscall.EWOULDBLOCK {
-			return err
-		}
-
-		// Wait for a bit and try again.
-		time.Sleep(50 * time.Millisecond)
-	}
-}
-
-// funlock releases an advisory lock on a file descriptor.
-func funlock(db *DB) error {
-	return syscall.Flock(int(db.file.Fd()), syscall.LOCK_UN)
-}
-
-// mmap memory maps a DB's data file.
-func mmap(db *DB, sz int) error {
-	// Map the data file to memory.
-	b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
-	if err != nil {
-		return err
-	}
-
-	// Advise the kernel that the mmap is accessed randomly.
-	if err := madvise(b, syscall.MADV_RANDOM); err != nil {
-		return fmt.Errorf("madvise: %s", err)
-	}
-
-	// Save the original byte slice and convert to a byte array pointer.
-	db.dataref = b
-	db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
-	db.datasz = sz
-	return nil
-}
-
-// munmap unmaps a DB's data file from memory.
-func munmap(db *DB) error {
-	// Ignore the unmap if we have no mapped data.
-	if db.dataref == nil {
-		return nil
-	}
-
-	// Unmap using the original byte slice.
-	err := syscall.Munmap(db.dataref)
-	db.dataref = nil
-	db.data = nil
-	db.datasz = 0
-	return err
-}
-
-// NOTE: This function is copied from stdlib because it is not available on darwin.
-func madvise(b []byte, advice int) (err error) {
-	_, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice))
-	if e1 != 0 {
-		err = e1
-	}
-	return
-}

+ 0 - 90
vendor/github.com/boltdb/bolt/bolt_unix_solaris.go

@@ -1,90 +0,0 @@
-package bolt
-
-import (
-	"fmt"
-	"os"
-	"syscall"
-	"time"
-	"unsafe"
-
-	"golang.org/x/sys/unix"
-)
-
-// flock acquires an advisory lock on a file descriptor.
-func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
-	var t time.Time
-	for {
-		// If we're beyond our timeout then return an error.
-		// This can only occur after we've attempted a flock once.
-		if t.IsZero() {
-			t = time.Now()
-		} else if timeout > 0 && time.Since(t) > timeout {
-			return ErrTimeout
-		}
-		var lock syscall.Flock_t
-		lock.Start = 0
-		lock.Len = 0
-		lock.Pid = 0
-		lock.Whence = 0
-		lock.Pid = 0
-		if exclusive {
-			lock.Type = syscall.F_WRLCK
-		} else {
-			lock.Type = syscall.F_RDLCK
-		}
-		err := syscall.FcntlFlock(db.file.Fd(), syscall.F_SETLK, &lock)
-		if err == nil {
-			return nil
-		} else if err != syscall.EAGAIN {
-			return err
-		}
-
-		// Wait for a bit and try again.
-		time.Sleep(50 * time.Millisecond)
-	}
-}
-
-// funlock releases an advisory lock on a file descriptor.
-func funlock(db *DB) error {
-	var lock syscall.Flock_t
-	lock.Start = 0
-	lock.Len = 0
-	lock.Type = syscall.F_UNLCK
-	lock.Whence = 0
-	return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock)
-}
-
-// mmap memory maps a DB's data file.
-func mmap(db *DB, sz int) error {
-	// Map the data file to memory.
-	b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
-	if err != nil {
-		return err
-	}
-
-	// Advise the kernel that the mmap is accessed randomly.
-	if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil {
-		return fmt.Errorf("madvise: %s", err)
-	}
-
-	// Save the original byte slice and convert to a byte array pointer.
-	db.dataref = b
-	db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
-	db.datasz = sz
-	return nil
-}
-
-// munmap unmaps a DB's data file from memory.
-func munmap(db *DB) error {
-	// Ignore the unmap if we have no mapped data.
-	if db.dataref == nil {
-		return nil
-	}
-
-	// Unmap using the original byte slice.
-	err := unix.Munmap(db.dataref)
-	db.dataref = nil
-	db.data = nil
-	db.datasz = 0
-	return err
-}

+ 0 - 144
vendor/github.com/boltdb/bolt/bolt_windows.go

@@ -1,144 +0,0 @@
-package bolt
-
-import (
-	"fmt"
-	"os"
-	"syscall"
-	"time"
-	"unsafe"
-)
-
-// LockFileEx code derived from golang build filemutex_windows.go @ v1.5.1
-var (
-	modkernel32      = syscall.NewLazyDLL("kernel32.dll")
-	procLockFileEx   = modkernel32.NewProc("LockFileEx")
-	procUnlockFileEx = modkernel32.NewProc("UnlockFileEx")
-)
-
-const (
-	lockExt = ".lock"
-
-	// see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
-	flagLockExclusive       = 2
-	flagLockFailImmediately = 1
-
-	// see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx
-	errLockViolation syscall.Errno = 0x21
-)
-
-func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
-	r, _, err := procLockFileEx.Call(uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)))
-	if r == 0 {
-		return err
-	}
-	return nil
-}
-
-func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
-	r, _, err := procUnlockFileEx.Call(uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0)
-	if r == 0 {
-		return err
-	}
-	return nil
-}
-
-// fdatasync flushes written data to a file descriptor.
-func fdatasync(db *DB) error {
-	return db.file.Sync()
-}
-
-// flock acquires an advisory lock on a file descriptor.
-func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
-	// Create a separate lock file on windows because a process
-	// cannot share an exclusive lock on the same file. This is
-	// needed during Tx.WriteTo().
-	f, err := os.OpenFile(db.path+lockExt, os.O_CREATE, mode)
-	if err != nil {
-		return err
-	}
-	db.lockfile = f
-
-	var t time.Time
-	for {
-		// If we're beyond our timeout then return an error.
-		// This can only occur after we've attempted a flock once.
-		if t.IsZero() {
-			t = time.Now()
-		} else if timeout > 0 && time.Since(t) > timeout {
-			return ErrTimeout
-		}
-
-		var flag uint32 = flagLockFailImmediately
-		if exclusive {
-			flag |= flagLockExclusive
-		}
-
-		err := lockFileEx(syscall.Handle(db.lockfile.Fd()), flag, 0, 1, 0, &syscall.Overlapped{})
-		if err == nil {
-			return nil
-		} else if err != errLockViolation {
-			return err
-		}
-
-		// Wait for a bit and try again.
-		time.Sleep(50 * time.Millisecond)
-	}
-}
-
-// funlock releases an advisory lock on a file descriptor.
-func funlock(db *DB) error {
-	err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{})
-	db.lockfile.Close()
-	os.Remove(db.path+lockExt)
-	return err
-}
-
-// mmap memory maps a DB's data file.
-// Based on: https://github.com/edsrzf/mmap-go
-func mmap(db *DB, sz int) error {
-	if !db.readOnly {
-		// Truncate the database to the size of the mmap.
-		if err := db.file.Truncate(int64(sz)); err != nil {
-			return fmt.Errorf("truncate: %s", err)
-		}
-	}
-
-	// Open a file mapping handle.
-	sizelo := uint32(sz >> 32)
-	sizehi := uint32(sz) & 0xffffffff
-	h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil)
-	if h == 0 {
-		return os.NewSyscallError("CreateFileMapping", errno)
-	}
-
-	// Create the memory map.
-	addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(sz))
-	if addr == 0 {
-		return os.NewSyscallError("MapViewOfFile", errno)
-	}
-
-	// Close mapping handle.
-	if err := syscall.CloseHandle(syscall.Handle(h)); err != nil {
-		return os.NewSyscallError("CloseHandle", err)
-	}
-
-	// Convert to a byte array.
-	db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr)))
-	db.datasz = sz
-
-	return nil
-}
-
-// munmap unmaps a pointer from a file.
-// Based on: https://github.com/edsrzf/mmap-go
-func munmap(db *DB) error {
-	if db.data == nil {
-		return nil
-	}
-
-	addr := (uintptr)(unsafe.Pointer(&db.data[0]))
-	if err := syscall.UnmapViewOfFile(addr); err != nil {
-		return os.NewSyscallError("UnmapViewOfFile", err)
-	}
-	return nil
-}

+ 0 - 8
vendor/github.com/boltdb/bolt/boltsync_unix.go

@@ -1,8 +0,0 @@
-// +build !windows,!plan9,!linux,!openbsd
-
-package bolt
-
-// fdatasync flushes written data to a file descriptor.
-func fdatasync(db *DB) error {
-	return db.file.Sync()
-}

+ 0 - 778
vendor/github.com/boltdb/bolt/bucket.go

@@ -1,778 +0,0 @@
-package bolt
-
-import (
-	"bytes"
-	"fmt"
-	"unsafe"
-)
-
-const (
-	// MaxKeySize is the maximum length of a key, in bytes.
-	MaxKeySize = 32768
-
-	// MaxValueSize is the maximum length of a value, in bytes.
-	MaxValueSize = (1 << 31) - 2
-)
-
-const (
-	maxUint = ^uint(0)
-	minUint = 0
-	maxInt  = int(^uint(0) >> 1)
-	minInt  = -maxInt - 1
-)
-
-const bucketHeaderSize = int(unsafe.Sizeof(bucket{}))
-
-const (
-	minFillPercent = 0.1
-	maxFillPercent = 1.0
-)
-
-// DefaultFillPercent is the percentage that split pages are filled.
-// This value can be changed by setting Bucket.FillPercent.
-const DefaultFillPercent = 0.5
-
-// Bucket represents a collection of key/value pairs inside the database.
-type Bucket struct {
-	*bucket
-	tx       *Tx                // the associated transaction
-	buckets  map[string]*Bucket // subbucket cache
-	page     *page              // inline page reference
-	rootNode *node              // materialized node for the root page.
-	nodes    map[pgid]*node     // node cache
-
-	// Sets the threshold for filling nodes when they split. By default,
-	// the bucket will fill to 50% but it can be useful to increase this
-	// amount if you know that your write workloads are mostly append-only.
-	//
-	// This is non-persisted across transactions so it must be set in every Tx.
-	FillPercent float64
-}
-
-// bucket represents the on-file representation of a bucket.
-// This is stored as the "value" of a bucket key. If the bucket is small enough,
-// then its root page can be stored inline in the "value", after the bucket
-// header. In the case of inline buckets, the "root" will be 0.
-type bucket struct {
-	root     pgid   // page id of the bucket's root-level page
-	sequence uint64 // monotonically incrementing, used by NextSequence()
-}
-
-// newBucket returns a new bucket associated with a transaction.
-func newBucket(tx *Tx) Bucket {
-	var b = Bucket{tx: tx, FillPercent: DefaultFillPercent}
-	if tx.writable {
-		b.buckets = make(map[string]*Bucket)
-		b.nodes = make(map[pgid]*node)
-	}
-	return b
-}
-
-// Tx returns the tx of the bucket.
-func (b *Bucket) Tx() *Tx {
-	return b.tx
-}
-
-// Root returns the root of the bucket.
-func (b *Bucket) Root() pgid {
-	return b.root
-}
-
-// Writable returns whether the bucket is writable.
-func (b *Bucket) Writable() bool {
-	return b.tx.writable
-}
-
-// Cursor creates a cursor associated with the bucket.
-// The cursor is only valid as long as the transaction is open.
-// Do not use a cursor after the transaction is closed.
-func (b *Bucket) Cursor() *Cursor {
-	// Update transaction statistics.
-	b.tx.stats.CursorCount++
-
-	// Allocate and return a cursor.
-	return &Cursor{
-		bucket: b,
-		stack:  make([]elemRef, 0),
-	}
-}
-
-// Bucket retrieves a nested bucket by name.
-// Returns nil if the bucket does not exist.
-// The bucket instance is only valid for the lifetime of the transaction.
-func (b *Bucket) Bucket(name []byte) *Bucket {
-	if b.buckets != nil {
-		if child := b.buckets[string(name)]; child != nil {
-			return child
-		}
-	}
-
-	// Move cursor to key.
-	c := b.Cursor()
-	k, v, flags := c.seek(name)
-
-	// Return nil if the key doesn't exist or it is not a bucket.
-	if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 {
-		return nil
-	}
-
-	// Otherwise create a bucket and cache it.
-	var child = b.openBucket(v)
-	if b.buckets != nil {
-		b.buckets[string(name)] = child
-	}
-
-	return child
-}
-
-// Helper method that re-interprets a sub-bucket value
-// from a parent into a Bucket
-func (b *Bucket) openBucket(value []byte) *Bucket {
-	var child = newBucket(b.tx)
-
-	// If unaligned load/stores are broken on this arch and value is
-	// unaligned simply clone to an aligned byte array.
-	unaligned := brokenUnaligned && uintptr(unsafe.Pointer(&value[0]))&3 != 0
-
-	if unaligned {
-		value = cloneBytes(value)
-	}
-
-	// If this is a writable transaction then we need to copy the bucket entry.
-	// Read-only transactions can point directly at the mmap entry.
-	if b.tx.writable && !unaligned {
-		child.bucket = &bucket{}
-		*child.bucket = *(*bucket)(unsafe.Pointer(&value[0]))
-	} else {
-		child.bucket = (*bucket)(unsafe.Pointer(&value[0]))
-	}
-
-	// Save a reference to the inline page if the bucket is inline.
-	if child.root == 0 {
-		child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize]))
-	}
-
-	return &child
-}
-
-// CreateBucket creates a new bucket at the given key and returns the new bucket.
-// Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long.
-// The bucket instance is only valid for the lifetime of the transaction.
-func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) {
-	if b.tx.db == nil {
-		return nil, ErrTxClosed
-	} else if !b.tx.writable {
-		return nil, ErrTxNotWritable
-	} else if len(key) == 0 {
-		return nil, ErrBucketNameRequired
-	}
-
-	// Move cursor to correct position.
-	c := b.Cursor()
-	k, _, flags := c.seek(key)
-
-	// Return an error if there is an existing key.
-	if bytes.Equal(key, k) {
-		if (flags & bucketLeafFlag) != 0 {
-			return nil, ErrBucketExists
-		} else {
-			return nil, ErrIncompatibleValue
-		}
-	}
-
-	// Create empty, inline bucket.
-	var bucket = Bucket{
-		bucket:      &bucket{},
-		rootNode:    &node{isLeaf: true},
-		FillPercent: DefaultFillPercent,
-	}
-	var value = bucket.write()
-
-	// Insert into node.
-	key = cloneBytes(key)
-	c.node().put(key, key, value, 0, bucketLeafFlag)
-
-	// Since subbuckets are not allowed on inline buckets, we need to
-	// dereference the inline page, if it exists. This will cause the bucket
-	// to be treated as a regular, non-inline bucket for the rest of the tx.
-	b.page = nil
-
-	return b.Bucket(key), nil
-}
-
-// CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it.
-// Returns an error if the bucket name is blank, or if the bucket name is too long.
-// The bucket instance is only valid for the lifetime of the transaction.
-func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) {
-	child, err := b.CreateBucket(key)
-	if err == ErrBucketExists {
-		return b.Bucket(key), nil
-	} else if err != nil {
-		return nil, err
-	}
-	return child, nil
-}
-
-// DeleteBucket deletes a bucket at the given key.
-// Returns an error if the bucket does not exists, or if the key represents a non-bucket value.
-func (b *Bucket) DeleteBucket(key []byte) error {
-	if b.tx.db == nil {
-		return ErrTxClosed
-	} else if !b.Writable() {
-		return ErrTxNotWritable
-	}
-
-	// Move cursor to correct position.
-	c := b.Cursor()
-	k, _, flags := c.seek(key)
-
-	// Return an error if bucket doesn't exist or is not a bucket.
-	if !bytes.Equal(key, k) {
-		return ErrBucketNotFound
-	} else if (flags & bucketLeafFlag) == 0 {
-		return ErrIncompatibleValue
-	}
-
-	// Recursively delete all child buckets.
-	child := b.Bucket(key)
-	err := child.ForEach(func(k, v []byte) error {
-		if v == nil {
-			if err := child.DeleteBucket(k); err != nil {
-				return fmt.Errorf("delete bucket: %s", err)
-			}
-		}
-		return nil
-	})
-	if err != nil {
-		return err
-	}
-
-	// Remove cached copy.
-	delete(b.buckets, string(key))
-
-	// Release all bucket pages to freelist.
-	child.nodes = nil
-	child.rootNode = nil
-	child.free()
-
-	// Delete the node if we have a matching key.
-	c.node().del(key)
-
-	return nil
-}
-
-// Get retrieves the value for a key in the bucket.
-// Returns a nil value if the key does not exist or if the key is a nested bucket.
-// The returned value is only valid for the life of the transaction.
-func (b *Bucket) Get(key []byte) []byte {
-	k, v, flags := b.Cursor().seek(key)
-
-	// Return nil if this is a bucket.
-	if (flags & bucketLeafFlag) != 0 {
-		return nil
-	}
-
-	// If our target node isn't the same key as what's passed in then return nil.
-	if !bytes.Equal(key, k) {
-		return nil
-	}
-	return v
-}
-
-// Put sets the value for a key in the bucket.
-// If the key exist then its previous value will be overwritten.
-// Supplied value must remain valid for the life of the transaction.
-// Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large.
-func (b *Bucket) Put(key []byte, value []byte) error {
-	if b.tx.db == nil {
-		return ErrTxClosed
-	} else if !b.Writable() {
-		return ErrTxNotWritable
-	} else if len(key) == 0 {
-		return ErrKeyRequired
-	} else if len(key) > MaxKeySize {
-		return ErrKeyTooLarge
-	} else if int64(len(value)) > MaxValueSize {
-		return ErrValueTooLarge
-	}
-
-	// Move cursor to correct position.
-	c := b.Cursor()
-	k, _, flags := c.seek(key)
-
-	// Return an error if there is an existing key with a bucket value.
-	if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 {
-		return ErrIncompatibleValue
-	}
-
-	// Insert into node.
-	key = cloneBytes(key)
-	c.node().put(key, key, value, 0, 0)
-
-	return nil
-}
-
-// Delete removes a key from the bucket.
-// If the key does not exist then nothing is done and a nil error is returned.
-// Returns an error if the bucket was created from a read-only transaction.
-func (b *Bucket) Delete(key []byte) error {
-	if b.tx.db == nil {
-		return ErrTxClosed
-	} else if !b.Writable() {
-		return ErrTxNotWritable
-	}
-
-	// Move cursor to correct position.
-	c := b.Cursor()
-	_, _, flags := c.seek(key)
-
-	// Return an error if there is already existing bucket value.
-	if (flags & bucketLeafFlag) != 0 {
-		return ErrIncompatibleValue
-	}
-
-	// Delete the node if we have a matching key.
-	c.node().del(key)
-
-	return nil
-}
-
-// Sequence returns the current integer for the bucket without incrementing it.
-func (b *Bucket) Sequence() uint64 { return b.bucket.sequence }
-
-// SetSequence updates the sequence number for the bucket.
-func (b *Bucket) SetSequence(v uint64) error {
-	if b.tx.db == nil {
-		return ErrTxClosed
-	} else if !b.Writable() {
-		return ErrTxNotWritable
-	}
-
-	// Materialize the root node if it hasn't been already so that the
-	// bucket will be saved during commit.
-	if b.rootNode == nil {
-		_ = b.node(b.root, nil)
-	}
-
-	// Increment and return the sequence.
-	b.bucket.sequence = v
-	return nil
-}
-
-// NextSequence returns an autoincrementing integer for the bucket.
-func (b *Bucket) NextSequence() (uint64, error) {
-	if b.tx.db == nil {
-		return 0, ErrTxClosed
-	} else if !b.Writable() {
-		return 0, ErrTxNotWritable
-	}
-
-	// Materialize the root node if it hasn't been already so that the
-	// bucket will be saved during commit.
-	if b.rootNode == nil {
-		_ = b.node(b.root, nil)
-	}
-
-	// Increment and return the sequence.
-	b.bucket.sequence++
-	return b.bucket.sequence, nil
-}
-
-// ForEach executes a function for each key/value pair in a bucket.
-// If the provided function returns an error then the iteration is stopped and
-// the error is returned to the caller. The provided function must not modify
-// the bucket; this will result in undefined behavior.
-func (b *Bucket) ForEach(fn func(k, v []byte) error) error {
-	if b.tx.db == nil {
-		return ErrTxClosed
-	}
-	c := b.Cursor()
-	for k, v := c.First(); k != nil; k, v = c.Next() {
-		if err := fn(k, v); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-// Stat returns stats on a bucket.
-func (b *Bucket) Stats() BucketStats {
-	var s, subStats BucketStats
-	pageSize := b.tx.db.pageSize
-	s.BucketN += 1
-	if b.root == 0 {
-		s.InlineBucketN += 1
-	}
-	b.forEachPage(func(p *page, depth int) {
-		if (p.flags & leafPageFlag) != 0 {
-			s.KeyN += int(p.count)
-
-			// used totals the used bytes for the page
-			used := pageHeaderSize
-
-			if p.count != 0 {
-				// If page has any elements, add all element headers.
-				used += leafPageElementSize * int(p.count-1)
-
-				// Add all element key, value sizes.
-				// The computation takes advantage of the fact that the position
-				// of the last element's key/value equals to the total of the sizes
-				// of all previous elements' keys and values.
-				// It also includes the last element's header.
-				lastElement := p.leafPageElement(p.count - 1)
-				used += int(lastElement.pos + lastElement.ksize + lastElement.vsize)
-			}
-
-			if b.root == 0 {
-				// For inlined bucket just update the inline stats
-				s.InlineBucketInuse += used
-			} else {
-				// For non-inlined bucket update all the leaf stats
-				s.LeafPageN++
-				s.LeafInuse += used
-				s.LeafOverflowN += int(p.overflow)
-
-				// Collect stats from sub-buckets.
-				// Do that by iterating over all element headers
-				// looking for the ones with the bucketLeafFlag.
-				for i := uint16(0); i < p.count; i++ {
-					e := p.leafPageElement(i)
-					if (e.flags & bucketLeafFlag) != 0 {
-						// For any bucket element, open the element value
-						// and recursively call Stats on the contained bucket.
-						subStats.Add(b.openBucket(e.value()).Stats())
-					}
-				}
-			}
-		} else if (p.flags & branchPageFlag) != 0 {
-			s.BranchPageN++
-			lastElement := p.branchPageElement(p.count - 1)
-
-			// used totals the used bytes for the page
-			// Add header and all element headers.
-			used := pageHeaderSize + (branchPageElementSize * int(p.count-1))
-
-			// Add size of all keys and values.
-			// Again, use the fact that last element's position equals to
-			// the total of key, value sizes of all previous elements.
-			used += int(lastElement.pos + lastElement.ksize)
-			s.BranchInuse += used
-			s.BranchOverflowN += int(p.overflow)
-		}
-
-		// Keep track of maximum page depth.
-		if depth+1 > s.Depth {
-			s.Depth = (depth + 1)
-		}
-	})
-
-	// Alloc stats can be computed from page counts and pageSize.
-	s.BranchAlloc = (s.BranchPageN + s.BranchOverflowN) * pageSize
-	s.LeafAlloc = (s.LeafPageN + s.LeafOverflowN) * pageSize
-
-	// Add the max depth of sub-buckets to get total nested depth.
-	s.Depth += subStats.Depth
-	// Add the stats for all sub-buckets
-	s.Add(subStats)
-	return s
-}
-
-// forEachPage iterates over every page in a bucket, including inline pages.
-func (b *Bucket) forEachPage(fn func(*page, int)) {
-	// If we have an inline page then just use that.
-	if b.page != nil {
-		fn(b.page, 0)
-		return
-	}
-
-	// Otherwise traverse the page hierarchy.
-	b.tx.forEachPage(b.root, 0, fn)
-}
-
-// forEachPageNode iterates over every page (or node) in a bucket.
-// This also includes inline pages.
-func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) {
-	// If we have an inline page or root node then just use that.
-	if b.page != nil {
-		fn(b.page, nil, 0)
-		return
-	}
-	b._forEachPageNode(b.root, 0, fn)
-}
-
-func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, int)) {
-	var p, n = b.pageNode(pgid)
-
-	// Execute function.
-	fn(p, n, depth)
-
-	// Recursively loop over children.
-	if p != nil {
-		if (p.flags & branchPageFlag) != 0 {
-			for i := 0; i < int(p.count); i++ {
-				elem := p.branchPageElement(uint16(i))
-				b._forEachPageNode(elem.pgid, depth+1, fn)
-			}
-		}
-	} else {
-		if !n.isLeaf {
-			for _, inode := range n.inodes {
-				b._forEachPageNode(inode.pgid, depth+1, fn)
-			}
-		}
-	}
-}
-
-// spill writes all the nodes for this bucket to dirty pages.
-func (b *Bucket) spill() error {
-	// Spill all child buckets first.
-	for name, child := range b.buckets {
-		// If the child bucket is small enough and it has no child buckets then
-		// write it inline into the parent bucket's page. Otherwise spill it
-		// like a normal bucket and make the parent value a pointer to the page.
-		var value []byte
-		if child.inlineable() {
-			child.free()
-			value = child.write()
-		} else {
-			if err := child.spill(); err != nil {
-				return err
-			}
-
-			// Update the child bucket header in this bucket.
-			value = make([]byte, unsafe.Sizeof(bucket{}))
-			var bucket = (*bucket)(unsafe.Pointer(&value[0]))
-			*bucket = *child.bucket
-		}
-
-		// Skip writing the bucket if there are no materialized nodes.
-		if child.rootNode == nil {
-			continue
-		}
-
-		// Update parent node.
-		var c = b.Cursor()
-		k, _, flags := c.seek([]byte(name))
-		if !bytes.Equal([]byte(name), k) {
-			panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k))
-		}
-		if flags&bucketLeafFlag == 0 {
-			panic(fmt.Sprintf("unexpected bucket header flag: %x", flags))
-		}
-		c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag)
-	}
-
-	// Ignore if there's not a materialized root node.
-	if b.rootNode == nil {
-		return nil
-	}
-
-	// Spill nodes.
-	if err := b.rootNode.spill(); err != nil {
-		return err
-	}
-	b.rootNode = b.rootNode.root()
-
-	// Update the root node for this bucket.
-	if b.rootNode.pgid >= b.tx.meta.pgid {
-		panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid))
-	}
-	b.root = b.rootNode.pgid
-
-	return nil
-}
-
-// inlineable returns true if a bucket is small enough to be written inline
-// and if it contains no subbuckets. Otherwise returns false.
-func (b *Bucket) inlineable() bool {
-	var n = b.rootNode
-
-	// Bucket must only contain a single leaf node.
-	if n == nil || !n.isLeaf {
-		return false
-	}
-
-	// Bucket is not inlineable if it contains subbuckets or if it goes beyond
-	// our threshold for inline bucket size.
-	var size = pageHeaderSize
-	for _, inode := range n.inodes {
-		size += leafPageElementSize + len(inode.key) + len(inode.value)
-
-		if inode.flags&bucketLeafFlag != 0 {
-			return false
-		} else if size > b.maxInlineBucketSize() {
-			return false
-		}
-	}
-
-	return true
-}
-
-// Returns the maximum total size of a bucket to make it a candidate for inlining.
-func (b *Bucket) maxInlineBucketSize() int {
-	return b.tx.db.pageSize / 4
-}
-
-// write allocates and writes a bucket to a byte slice.
-func (b *Bucket) write() []byte {
-	// Allocate the appropriate size.
-	var n = b.rootNode
-	var value = make([]byte, bucketHeaderSize+n.size())
-
-	// Write a bucket header.
-	var bucket = (*bucket)(unsafe.Pointer(&value[0]))
-	*bucket = *b.bucket
-
-	// Convert byte slice to a fake page and write the root node.
-	var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize]))
-	n.write(p)
-
-	return value
-}
-
-// rebalance attempts to balance all nodes.
-func (b *Bucket) rebalance() {
-	for _, n := range b.nodes {
-		n.rebalance()
-	}
-	for _, child := range b.buckets {
-		child.rebalance()
-	}
-}
-
-// node creates a node from a page and associates it with a given parent.
-func (b *Bucket) node(pgid pgid, parent *node) *node {
-	_assert(b.nodes != nil, "nodes map expected")
-
-	// Retrieve node if it's already been created.
-	if n := b.nodes[pgid]; n != nil {
-		return n
-	}
-
-	// Otherwise create a node and cache it.
-	n := &node{bucket: b, parent: parent}
-	if parent == nil {
-		b.rootNode = n
-	} else {
-		parent.children = append(parent.children, n)
-	}
-
-	// Use the inline page if this is an inline bucket.
-	var p = b.page
-	if p == nil {
-		p = b.tx.page(pgid)
-	}
-
-	// Read the page into the node and cache it.
-	n.read(p)
-	b.nodes[pgid] = n
-
-	// Update statistics.
-	b.tx.stats.NodeCount++
-
-	return n
-}
-
-// free recursively frees all pages in the bucket.
-func (b *Bucket) free() {
-	if b.root == 0 {
-		return
-	}
-
-	var tx = b.tx
-	b.forEachPageNode(func(p *page, n *node, _ int) {
-		if p != nil {
-			tx.db.freelist.free(tx.meta.txid, p)
-		} else {
-			n.free()
-		}
-	})
-	b.root = 0
-}
-
-// dereference removes all references to the old mmap.
-func (b *Bucket) dereference() {
-	if b.rootNode != nil {
-		b.rootNode.root().dereference()
-	}
-
-	for _, child := range b.buckets {
-		child.dereference()
-	}
-}
-
-// pageNode returns the in-memory node, if it exists.
-// Otherwise returns the underlying page.
-func (b *Bucket) pageNode(id pgid) (*page, *node) {
-	// Inline buckets have a fake page embedded in their value so treat them
-	// differently. We'll return the rootNode (if available) or the fake page.
-	if b.root == 0 {
-		if id != 0 {
-			panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id))
-		}
-		if b.rootNode != nil {
-			return nil, b.rootNode
-		}
-		return b.page, nil
-	}
-
-	// Check the node cache for non-inline buckets.
-	if b.nodes != nil {
-		if n := b.nodes[id]; n != nil {
-			return nil, n
-		}
-	}
-
-	// Finally lookup the page from the transaction if no node is materialized.
-	return b.tx.page(id), nil
-}
-
-// BucketStats records statistics about resources used by a bucket.
-type BucketStats struct {
-	// Page count statistics.
-	BranchPageN     int // number of logical branch pages
-	BranchOverflowN int // number of physical branch overflow pages
-	LeafPageN       int // number of logical leaf pages
-	LeafOverflowN   int // number of physical leaf overflow pages
-
-	// Tree statistics.
-	KeyN  int // number of keys/value pairs
-	Depth int // number of levels in B+tree
-
-	// Page size utilization.
-	BranchAlloc int // bytes allocated for physical branch pages
-	BranchInuse int // bytes actually used for branch data
-	LeafAlloc   int // bytes allocated for physical leaf pages
-	LeafInuse   int // bytes actually used for leaf data
-
-	// Bucket statistics
-	BucketN           int // total number of buckets including the top bucket
-	InlineBucketN     int // total number on inlined buckets
-	InlineBucketInuse int // bytes used for inlined buckets (also accounted for in LeafInuse)
-}
-
-func (s *BucketStats) Add(other BucketStats) {
-	s.BranchPageN += other.BranchPageN
-	s.BranchOverflowN += other.BranchOverflowN
-	s.LeafPageN += other.LeafPageN
-	s.LeafOverflowN += other.LeafOverflowN
-	s.KeyN += other.KeyN
-	if s.Depth < other.Depth {
-		s.Depth = other.Depth
-	}
-	s.BranchAlloc += other.BranchAlloc
-	s.BranchInuse += other.BranchInuse
-	s.LeafAlloc += other.LeafAlloc
-	s.LeafInuse += other.LeafInuse
-
-	s.BucketN += other.BucketN
-	s.InlineBucketN += other.InlineBucketN
-	s.InlineBucketInuse += other.InlineBucketInuse
-}
-
-// cloneBytes returns a copy of a given slice.
-func cloneBytes(v []byte) []byte {
-	var clone = make([]byte, len(v))
-	copy(clone, v)
-	return clone
-}

+ 0 - 400
vendor/github.com/boltdb/bolt/cursor.go

@@ -1,400 +0,0 @@
-package bolt
-
-import (
-	"bytes"
-	"fmt"
-	"sort"
-)
-
-// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order.
-// Cursors see nested buckets with value == nil.
-// Cursors can be obtained from a transaction and are valid as long as the transaction is open.
-//
-// Keys and values returned from the cursor are only valid for the life of the transaction.
-//
-// Changing data while traversing with a cursor may cause it to be invalidated
-// and return unexpected keys and/or values. You must reposition your cursor
-// after mutating data.
-type Cursor struct {
-	bucket *Bucket
-	stack  []elemRef
-}
-
-// Bucket returns the bucket that this cursor was created from.
-func (c *Cursor) Bucket() *Bucket {
-	return c.bucket
-}
-
-// First moves the cursor to the first item in the bucket and returns its key and value.
-// If the bucket is empty then a nil key and value are returned.
-// The returned key and value are only valid for the life of the transaction.
-func (c *Cursor) First() (key []byte, value []byte) {
-	_assert(c.bucket.tx.db != nil, "tx closed")
-	c.stack = c.stack[:0]
-	p, n := c.bucket.pageNode(c.bucket.root)
-	c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
-	c.first()
-
-	// If we land on an empty page then move to the next value.
-	// https://github.com/boltdb/bolt/issues/450
-	if c.stack[len(c.stack)-1].count() == 0 {
-		c.next()
-	}
-
-	k, v, flags := c.keyValue()
-	if (flags & uint32(bucketLeafFlag)) != 0 {
-		return k, nil
-	}
-	return k, v
-
-}
-
-// Last moves the cursor to the last item in the bucket and returns its key and value.
-// If the bucket is empty then a nil key and value are returned.
-// The returned key and value are only valid for the life of the transaction.
-func (c *Cursor) Last() (key []byte, value []byte) {
-	_assert(c.bucket.tx.db != nil, "tx closed")
-	c.stack = c.stack[:0]
-	p, n := c.bucket.pageNode(c.bucket.root)
-	ref := elemRef{page: p, node: n}
-	ref.index = ref.count() - 1
-	c.stack = append(c.stack, ref)
-	c.last()
-	k, v, flags := c.keyValue()
-	if (flags & uint32(bucketLeafFlag)) != 0 {
-		return k, nil
-	}
-	return k, v
-}
-
-// Next moves the cursor to the next item in the bucket and returns its key and value.
-// If the cursor is at the end of the bucket then a nil key and value are returned.
-// The returned key and value are only valid for the life of the transaction.
-func (c *Cursor) Next() (key []byte, value []byte) {
-	_assert(c.bucket.tx.db != nil, "tx closed")
-	k, v, flags := c.next()
-	if (flags & uint32(bucketLeafFlag)) != 0 {
-		return k, nil
-	}
-	return k, v
-}
-
-// Prev moves the cursor to the previous item in the bucket and returns its key and value.
-// If the cursor is at the beginning of the bucket then a nil key and value are returned.
-// The returned key and value are only valid for the life of the transaction.
-func (c *Cursor) Prev() (key []byte, value []byte) {
-	_assert(c.bucket.tx.db != nil, "tx closed")
-
-	// Attempt to move back one element until we're successful.
-	// Move up the stack as we hit the beginning of each page in our stack.
-	for i := len(c.stack) - 1; i >= 0; i-- {
-		elem := &c.stack[i]
-		if elem.index > 0 {
-			elem.index--
-			break
-		}
-		c.stack = c.stack[:i]
-	}
-
-	// If we've hit the end then return nil.
-	if len(c.stack) == 0 {
-		return nil, nil
-	}
-
-	// Move down the stack to find the last element of the last leaf under this branch.
-	c.last()
-	k, v, flags := c.keyValue()
-	if (flags & uint32(bucketLeafFlag)) != 0 {
-		return k, nil
-	}
-	return k, v
-}
-
-// Seek moves the cursor to a given key and returns it.
-// If the key does not exist then the next key is used. If no keys
-// follow, a nil key is returned.
-// The returned key and value are only valid for the life of the transaction.
-func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) {
-	k, v, flags := c.seek(seek)
-
-	// If we ended up after the last element of a page then move to the next one.
-	if ref := &c.stack[len(c.stack)-1]; ref.index >= ref.count() {
-		k, v, flags = c.next()
-	}
-
-	if k == nil {
-		return nil, nil
-	} else if (flags & uint32(bucketLeafFlag)) != 0 {
-		return k, nil
-	}
-	return k, v
-}
-
-// Delete removes the current key/value under the cursor from the bucket.
-// Delete fails if current key/value is a bucket or if the transaction is not writable.
-func (c *Cursor) Delete() error {
-	if c.bucket.tx.db == nil {
-		return ErrTxClosed
-	} else if !c.bucket.Writable() {
-		return ErrTxNotWritable
-	}
-
-	key, _, flags := c.keyValue()
-	// Return an error if current value is a bucket.
-	if (flags & bucketLeafFlag) != 0 {
-		return ErrIncompatibleValue
-	}
-	c.node().del(key)
-
-	return nil
-}
-
-// seek moves the cursor to a given key and returns it.
-// If the key does not exist then the next key is used.
-func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) {
-	_assert(c.bucket.tx.db != nil, "tx closed")
-
-	// Start from root page/node and traverse to correct page.
-	c.stack = c.stack[:0]
-	c.search(seek, c.bucket.root)
-	ref := &c.stack[len(c.stack)-1]
-
-	// If the cursor is pointing to the end of page/node then return nil.
-	if ref.index >= ref.count() {
-		return nil, nil, 0
-	}
-
-	// If this is a bucket then return a nil value.
-	return c.keyValue()
-}
-
-// first moves the cursor to the first leaf element under the last page in the stack.
-func (c *Cursor) first() {
-	for {
-		// Exit when we hit a leaf page.
-		var ref = &c.stack[len(c.stack)-1]
-		if ref.isLeaf() {
-			break
-		}
-
-		// Keep adding pages pointing to the first element to the stack.
-		var pgid pgid
-		if ref.node != nil {
-			pgid = ref.node.inodes[ref.index].pgid
-		} else {
-			pgid = ref.page.branchPageElement(uint16(ref.index)).pgid
-		}
-		p, n := c.bucket.pageNode(pgid)
-		c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
-	}
-}
-
-// last moves the cursor to the last leaf element under the last page in the stack.
-func (c *Cursor) last() {
-	for {
-		// Exit when we hit a leaf page.
-		ref := &c.stack[len(c.stack)-1]
-		if ref.isLeaf() {
-			break
-		}
-
-		// Keep adding pages pointing to the last element in the stack.
-		var pgid pgid
-		if ref.node != nil {
-			pgid = ref.node.inodes[ref.index].pgid
-		} else {
-			pgid = ref.page.branchPageElement(uint16(ref.index)).pgid
-		}
-		p, n := c.bucket.pageNode(pgid)
-
-		var nextRef = elemRef{page: p, node: n}
-		nextRef.index = nextRef.count() - 1
-		c.stack = append(c.stack, nextRef)
-	}
-}
-
-// next moves to the next leaf element and returns the key and value.
-// If the cursor is at the last leaf element then it stays there and returns nil.
-func (c *Cursor) next() (key []byte, value []byte, flags uint32) {
-	for {
-		// Attempt to move over one element until we're successful.
-		// Move up the stack as we hit the end of each page in our stack.
-		var i int
-		for i = len(c.stack) - 1; i >= 0; i-- {
-			elem := &c.stack[i]
-			if elem.index < elem.count()-1 {
-				elem.index++
-				break
-			}
-		}
-
-		// If we've hit the root page then stop and return. This will leave the
-		// cursor on the last element of the last page.
-		if i == -1 {
-			return nil, nil, 0
-		}
-
-		// Otherwise start from where we left off in the stack and find the
-		// first element of the first leaf page.
-		c.stack = c.stack[:i+1]
-		c.first()
-
-		// If this is an empty page then restart and move back up the stack.
-		// https://github.com/boltdb/bolt/issues/450
-		if c.stack[len(c.stack)-1].count() == 0 {
-			continue
-		}
-
-		return c.keyValue()
-	}
-}
-
-// search recursively performs a binary search against a given page/node until it finds a given key.
-func (c *Cursor) search(key []byte, pgid pgid) {
-	p, n := c.bucket.pageNode(pgid)
-	if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 {
-		panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags))
-	}
-	e := elemRef{page: p, node: n}
-	c.stack = append(c.stack, e)
-
-	// If we're on a leaf page/node then find the specific node.
-	if e.isLeaf() {
-		c.nsearch(key)
-		return
-	}
-
-	if n != nil {
-		c.searchNode(key, n)
-		return
-	}
-	c.searchPage(key, p)
-}
-
-func (c *Cursor) searchNode(key []byte, n *node) {
-	var exact bool
-	index := sort.Search(len(n.inodes), func(i int) bool {
-		// TODO(benbjohnson): Optimize this range search. It's a bit hacky right now.
-		// sort.Search() finds the lowest index where f() != -1 but we need the highest index.
-		ret := bytes.Compare(n.inodes[i].key, key)
-		if ret == 0 {
-			exact = true
-		}
-		return ret != -1
-	})
-	if !exact && index > 0 {
-		index--
-	}
-	c.stack[len(c.stack)-1].index = index
-
-	// Recursively search to the next page.
-	c.search(key, n.inodes[index].pgid)
-}
-
-func (c *Cursor) searchPage(key []byte, p *page) {
-	// Binary search for the correct range.
-	inodes := p.branchPageElements()
-
-	var exact bool
-	index := sort.Search(int(p.count), func(i int) bool {
-		// TODO(benbjohnson): Optimize this range search. It's a bit hacky right now.
-		// sort.Search() finds the lowest index where f() != -1 but we need the highest index.
-		ret := bytes.Compare(inodes[i].key(), key)
-		if ret == 0 {
-			exact = true
-		}
-		return ret != -1
-	})
-	if !exact && index > 0 {
-		index--
-	}
-	c.stack[len(c.stack)-1].index = index
-
-	// Recursively search to the next page.
-	c.search(key, inodes[index].pgid)
-}
-
-// nsearch searches the leaf node on the top of the stack for a key.
-func (c *Cursor) nsearch(key []byte) {
-	e := &c.stack[len(c.stack)-1]
-	p, n := e.page, e.node
-
-	// If we have a node then search its inodes.
-	if n != nil {
-		index := sort.Search(len(n.inodes), func(i int) bool {
-			return bytes.Compare(n.inodes[i].key, key) != -1
-		})
-		e.index = index
-		return
-	}
-
-	// If we have a page then search its leaf elements.
-	inodes := p.leafPageElements()
-	index := sort.Search(int(p.count), func(i int) bool {
-		return bytes.Compare(inodes[i].key(), key) != -1
-	})
-	e.index = index
-}
-
-// keyValue returns the key and value of the current leaf element.
-func (c *Cursor) keyValue() ([]byte, []byte, uint32) {
-	ref := &c.stack[len(c.stack)-1]
-	if ref.count() == 0 || ref.index >= ref.count() {
-		return nil, nil, 0
-	}
-
-	// Retrieve value from node.
-	if ref.node != nil {
-		inode := &ref.node.inodes[ref.index]
-		return inode.key, inode.value, inode.flags
-	}
-
-	// Or retrieve value from page.
-	elem := ref.page.leafPageElement(uint16(ref.index))
-	return elem.key(), elem.value(), elem.flags
-}
-
-// node returns the node that the cursor is currently positioned on.
-func (c *Cursor) node() *node {
-	_assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack")
-
-	// If the top of the stack is a leaf node then just return it.
-	if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() {
-		return ref.node
-	}
-
-	// Start from root and traverse down the hierarchy.
-	var n = c.stack[0].node
-	if n == nil {
-		n = c.bucket.node(c.stack[0].page.id, nil)
-	}
-	for _, ref := range c.stack[:len(c.stack)-1] {
-		_assert(!n.isLeaf, "expected branch node")
-		n = n.childAt(int(ref.index))
-	}
-	_assert(n.isLeaf, "expected leaf node")
-	return n
-}
-
-// elemRef represents a reference to an element on a given page/node.
-type elemRef struct {
-	page  *page
-	node  *node
-	index int
-}
-
-// isLeaf returns whether the ref is pointing at a leaf page/node.
-func (r *elemRef) isLeaf() bool {
-	if r.node != nil {
-		return r.node.isLeaf
-	}
-	return (r.page.flags & leafPageFlag) != 0
-}
-
-// count returns the number of inodes or page elements.
-func (r *elemRef) count() int {
-	if r.node != nil {
-		return len(r.node.inodes)
-	}
-	return int(r.page.count)
-}

+ 0 - 1036
vendor/github.com/boltdb/bolt/db.go

@@ -1,1036 +0,0 @@
-package bolt
-
-import (
-	"errors"
-	"fmt"
-	"hash/fnv"
-	"log"
-	"os"
-	"runtime"
-	"runtime/debug"
-	"strings"
-	"sync"
-	"time"
-	"unsafe"
-)
-
-// The largest step that can be taken when remapping the mmap.
-const maxMmapStep = 1 << 30 // 1GB
-
-// The data file format version.
-const version = 2
-
-// Represents a marker value to indicate that a file is a Bolt DB.
-const magic uint32 = 0xED0CDAED
-
-// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when
-// syncing changes to a file.  This is required as some operating systems,
-// such as OpenBSD, do not have a unified buffer cache (UBC) and writes
-// must be synchronized using the msync(2) syscall.
-const IgnoreNoSync = runtime.GOOS == "openbsd"
-
-// Default values if not set in a DB instance.
-const (
-	DefaultMaxBatchSize  int = 1000
-	DefaultMaxBatchDelay     = 10 * time.Millisecond
-	DefaultAllocSize         = 16 * 1024 * 1024
-)
-
-// default page size for db is set to the OS page size.
-var defaultPageSize = os.Getpagesize()
-
-// DB represents a collection of buckets persisted to a file on disk.
-// All data access is performed through transactions which can be obtained through the DB.
-// All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called.
-type DB struct {
-	// When enabled, the database will perform a Check() after every commit.
-	// A panic is issued if the database is in an inconsistent state. This
-	// flag has a large performance impact so it should only be used for
-	// debugging purposes.
-	StrictMode bool
-
-	// Setting the NoSync flag will cause the database to skip fsync()
-	// calls after each commit. This can be useful when bulk loading data
-	// into a database and you can restart the bulk load in the event of
-	// a system failure or database corruption. Do not set this flag for
-	// normal use.
-	//
-	// If the package global IgnoreNoSync constant is true, this value is
-	// ignored.  See the comment on that constant for more details.
-	//
-	// THIS IS UNSAFE. PLEASE USE WITH CAUTION.
-	NoSync bool
-
-	// When true, skips the truncate call when growing the database.
-	// Setting this to true is only safe on non-ext3/ext4 systems.
-	// Skipping truncation avoids preallocation of hard drive space and
-	// bypasses a truncate() and fsync() syscall on remapping.
-	//
-	// https://github.com/boltdb/bolt/issues/284
-	NoGrowSync bool
-
-	// If you want to read the entire database fast, you can set MmapFlag to
-	// syscall.MAP_POPULATE on Linux 2.6.23+ for sequential read-ahead.
-	MmapFlags int
-
-	// MaxBatchSize is the maximum size of a batch. Default value is
-	// copied from DefaultMaxBatchSize in Open.
-	//
-	// If <=0, disables batching.
-	//
-	// Do not change concurrently with calls to Batch.
-	MaxBatchSize int
-
-	// MaxBatchDelay is the maximum delay before a batch starts.
-	// Default value is copied from DefaultMaxBatchDelay in Open.
-	//
-	// If <=0, effectively disables batching.
-	//
-	// Do not change concurrently with calls to Batch.
-	MaxBatchDelay time.Duration
-
-	// AllocSize is the amount of space allocated when the database
-	// needs to create new pages. This is done to amortize the cost
-	// of truncate() and fsync() when growing the data file.
-	AllocSize int
-
-	path     string
-	file     *os.File
-	lockfile *os.File // windows only
-	dataref  []byte   // mmap'ed readonly, write throws SEGV
-	data     *[maxMapSize]byte
-	datasz   int
-	filesz   int // current on disk file size
-	meta0    *meta
-	meta1    *meta
-	pageSize int
-	opened   bool
-	rwtx     *Tx
-	txs      []*Tx
-	freelist *freelist
-	stats    Stats
-
-	pagePool sync.Pool
-
-	batchMu sync.Mutex
-	batch   *batch
-
-	rwlock   sync.Mutex   // Allows only one writer at a time.
-	metalock sync.Mutex   // Protects meta page access.
-	mmaplock sync.RWMutex // Protects mmap access during remapping.
-	statlock sync.RWMutex // Protects stats access.
-
-	ops struct {
-		writeAt func(b []byte, off int64) (n int, err error)
-	}
-
-	// Read only mode.
-	// When true, Update() and Begin(true) return ErrDatabaseReadOnly immediately.
-	readOnly bool
-}
-
-// Path returns the path to currently open database file.
-func (db *DB) Path() string {
-	return db.path
-}
-
-// GoString returns the Go string representation of the database.
-func (db *DB) GoString() string {
-	return fmt.Sprintf("bolt.DB{path:%q}", db.path)
-}
-
-// String returns the string representation of the database.
-func (db *DB) String() string {
-	return fmt.Sprintf("DB<%q>", db.path)
-}
-
-// Open creates and opens a database at the given path.
-// If the file does not exist then it will be created automatically.
-// Passing in nil options will cause Bolt to open the database with the default options.
-func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
-	var db = &DB{opened: true}
-
-	// Set default options if no options are provided.
-	if options == nil {
-		options = DefaultOptions
-	}
-	db.NoGrowSync = options.NoGrowSync
-	db.MmapFlags = options.MmapFlags
-
-	// Set default values for later DB operations.
-	db.MaxBatchSize = DefaultMaxBatchSize
-	db.MaxBatchDelay = DefaultMaxBatchDelay
-	db.AllocSize = DefaultAllocSize
-
-	flag := os.O_RDWR
-	if options.ReadOnly {
-		flag = os.O_RDONLY
-		db.readOnly = true
-	}
-
-	// Open data file and separate sync handler for metadata writes.
-	db.path = path
-	var err error
-	if db.file, err = os.OpenFile(db.path, flag|os.O_CREATE, mode); err != nil {
-		_ = db.close()
-		return nil, err
-	}
-
-	// Lock file so that other processes using Bolt in read-write mode cannot
-	// use the database  at the same time. This would cause corruption since
-	// the two processes would write meta pages and free pages separately.
-	// The database file is locked exclusively (only one process can grab the lock)
-	// if !options.ReadOnly.
-	// The database file is locked using the shared lock (more than one process may
-	// hold a lock at the same time) otherwise (options.ReadOnly is set).
-	if err := flock(db, mode, !db.readOnly, options.Timeout); err != nil {
-		_ = db.close()
-		return nil, err
-	}
-
-	// Default values for test hooks
-	db.ops.writeAt = db.file.WriteAt
-
-	// Initialize the database if it doesn't exist.
-	if info, err := db.file.Stat(); err != nil {
-		return nil, err
-	} else if info.Size() == 0 {
-		// Initialize new files with meta pages.
-		if err := db.init(); err != nil {
-			return nil, err
-		}
-	} else {
-		// Read the first meta page to determine the page size.
-		var buf [0x1000]byte
-		if _, err := db.file.ReadAt(buf[:], 0); err == nil {
-			m := db.pageInBuffer(buf[:], 0).meta()
-			if err := m.validate(); err != nil {
-				// If we can't read the page size, we can assume it's the same
-				// as the OS -- since that's how the page size was chosen in the
-				// first place.
-				//
-				// If the first page is invalid and this OS uses a different
-				// page size than what the database was created with then we
-				// are out of luck and cannot access the database.
-				db.pageSize = os.Getpagesize()
-			} else {
-				db.pageSize = int(m.pageSize)
-			}
-		}
-	}
-
-	// Initialize page pool.
-	db.pagePool = sync.Pool{
-		New: func() interface{} {
-			return make([]byte, db.pageSize)
-		},
-	}
-
-	// Memory map the data file.
-	if err := db.mmap(options.InitialMmapSize); err != nil {
-		_ = db.close()
-		return nil, err
-	}
-
-	// Read in the freelist.
-	db.freelist = newFreelist()
-	db.freelist.read(db.page(db.meta().freelist))
-
-	// Mark the database as opened and return.
-	return db, nil
-}
-
-// mmap opens the underlying memory-mapped file and initializes the meta references.
-// minsz is the minimum size that the new mmap can be.
-func (db *DB) mmap(minsz int) error {
-	db.mmaplock.Lock()
-	defer db.mmaplock.Unlock()
-
-	info, err := db.file.Stat()
-	if err != nil {
-		return fmt.Errorf("mmap stat error: %s", err)
-	} else if int(info.Size()) < db.pageSize*2 {
-		return fmt.Errorf("file size too small")
-	}
-
-	// Ensure the size is at least the minimum size.
-	var size = int(info.Size())
-	if size < minsz {
-		size = minsz
-	}
-	size, err = db.mmapSize(size)
-	if err != nil {
-		return err
-	}
-
-	// Dereference all mmap references before unmapping.
-	if db.rwtx != nil {
-		db.rwtx.root.dereference()
-	}
-
-	// Unmap existing data before continuing.
-	if err := db.munmap(); err != nil {
-		return err
-	}
-
-	// Memory-map the data file as a byte slice.
-	if err := mmap(db, size); err != nil {
-		return err
-	}
-
-	// Save references to the meta pages.
-	db.meta0 = db.page(0).meta()
-	db.meta1 = db.page(1).meta()
-
-	// Validate the meta pages. We only return an error if both meta pages fail
-	// validation, since meta0 failing validation means that it wasn't saved
-	// properly -- but we can recover using meta1. And vice-versa.
-	err0 := db.meta0.validate()
-	err1 := db.meta1.validate()
-	if err0 != nil && err1 != nil {
-		return err0
-	}
-
-	return nil
-}
-
-// munmap unmaps the data file from memory.
-func (db *DB) munmap() error {
-	if err := munmap(db); err != nil {
-		return fmt.Errorf("unmap error: " + err.Error())
-	}
-	return nil
-}
-
-// mmapSize determines the appropriate size for the mmap given the current size
-// of the database. The minimum size is 32KB and doubles until it reaches 1GB.
-// Returns an error if the new mmap size is greater than the max allowed.
-func (db *DB) mmapSize(size int) (int, error) {
-	// Double the size from 32KB until 1GB.
-	for i := uint(15); i <= 30; i++ {
-		if size <= 1<<i {
-			return 1 << i, nil
-		}
-	}
-
-	// Verify the requested size is not above the maximum allowed.
-	if size > maxMapSize {
-		return 0, fmt.Errorf("mmap too large")
-	}
-
-	// If larger than 1GB then grow by 1GB at a time.
-	sz := int64(size)
-	if remainder := sz % int64(maxMmapStep); remainder > 0 {
-		sz += int64(maxMmapStep) - remainder
-	}
-
-	// Ensure that the mmap size is a multiple of the page size.
-	// This should always be true since we're incrementing in MBs.
-	pageSize := int64(db.pageSize)
-	if (sz % pageSize) != 0 {
-		sz = ((sz / pageSize) + 1) * pageSize
-	}
-
-	// If we've exceeded the max size then only grow up to the max size.
-	if sz > maxMapSize {
-		sz = maxMapSize
-	}
-
-	return int(sz), nil
-}
-
-// init creates a new database file and initializes its meta pages.
-func (db *DB) init() error {
-	// Set the page size to the OS page size.
-	db.pageSize = os.Getpagesize()
-
-	// Create two meta pages on a buffer.
-	buf := make([]byte, db.pageSize*4)
-	for i := 0; i < 2; i++ {
-		p := db.pageInBuffer(buf[:], pgid(i))
-		p.id = pgid(i)
-		p.flags = metaPageFlag
-
-		// Initialize the meta page.
-		m := p.meta()
-		m.magic = magic
-		m.version = version
-		m.pageSize = uint32(db.pageSize)
-		m.freelist = 2
-		m.root = bucket{root: 3}
-		m.pgid = 4
-		m.txid = txid(i)
-		m.checksum = m.sum64()
-	}
-
-	// Write an empty freelist at page 3.
-	p := db.pageInBuffer(buf[:], pgid(2))
-	p.id = pgid(2)
-	p.flags = freelistPageFlag
-	p.count = 0
-
-	// Write an empty leaf page at page 4.
-	p = db.pageInBuffer(buf[:], pgid(3))
-	p.id = pgid(3)
-	p.flags = leafPageFlag
-	p.count = 0
-
-	// Write the buffer to our data file.
-	if _, err := db.ops.writeAt(buf, 0); err != nil {
-		return err
-	}
-	if err := fdatasync(db); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-// Close releases all database resources.
-// All transactions must be closed before closing the database.
-func (db *DB) Close() error {
-	db.rwlock.Lock()
-	defer db.rwlock.Unlock()
-
-	db.metalock.Lock()
-	defer db.metalock.Unlock()
-
-	db.mmaplock.RLock()
-	defer db.mmaplock.RUnlock()
-
-	return db.close()
-}
-
-func (db *DB) close() error {
-	if !db.opened {
-		return nil
-	}
-
-	db.opened = false
-
-	db.freelist = nil
-
-	// Clear ops.
-	db.ops.writeAt = nil
-
-	// Close the mmap.
-	if err := db.munmap(); err != nil {
-		return err
-	}
-
-	// Close file handles.
-	if db.file != nil {
-		// No need to unlock read-only file.
-		if !db.readOnly {
-			// Unlock the file.
-			if err := funlock(db); err != nil {
-				log.Printf("bolt.Close(): funlock error: %s", err)
-			}
-		}
-
-		// Close the file descriptor.
-		if err := db.file.Close(); err != nil {
-			return fmt.Errorf("db file close: %s", err)
-		}
-		db.file = nil
-	}
-
-	db.path = ""
-	return nil
-}
-
-// Begin starts a new transaction.
-// Multiple read-only transactions can be used concurrently but only one
-// write transaction can be used at a time. Starting multiple write transactions
-// will cause the calls to block and be serialized until the current write
-// transaction finishes.
-//
-// Transactions should not be dependent on one another. Opening a read
-// transaction and a write transaction in the same goroutine can cause the
-// writer to deadlock because the database periodically needs to re-mmap itself
-// as it grows and it cannot do that while a read transaction is open.
-//
-// If a long running read transaction (for example, a snapshot transaction) is
-// needed, you might want to set DB.InitialMmapSize to a large enough value
-// to avoid potential blocking of write transaction.
-//
-// IMPORTANT: You must close read-only transactions after you are finished or
-// else the database will not reclaim old pages.
-func (db *DB) Begin(writable bool) (*Tx, error) {
-	if writable {
-		return db.beginRWTx()
-	}
-	return db.beginTx()
-}
-
-func (db *DB) beginTx() (*Tx, error) {
-	// Lock the meta pages while we initialize the transaction. We obtain
-	// the meta lock before the mmap lock because that's the order that the
-	// write transaction will obtain them.
-	db.metalock.Lock()
-
-	// Obtain a read-only lock on the mmap. When the mmap is remapped it will
-	// obtain a write lock so all transactions must finish before it can be
-	// remapped.
-	db.mmaplock.RLock()
-
-	// Exit if the database is not open yet.
-	if !db.opened {
-		db.mmaplock.RUnlock()
-		db.metalock.Unlock()
-		return nil, ErrDatabaseNotOpen
-	}
-
-	// Create a transaction associated with the database.
-	t := &Tx{}
-	t.init(db)
-
-	// Keep track of transaction until it closes.
-	db.txs = append(db.txs, t)
-	n := len(db.txs)
-
-	// Unlock the meta pages.
-	db.metalock.Unlock()
-
-	// Update the transaction stats.
-	db.statlock.Lock()
-	db.stats.TxN++
-	db.stats.OpenTxN = n
-	db.statlock.Unlock()
-
-	return t, nil
-}
-
-func (db *DB) beginRWTx() (*Tx, error) {
-	// If the database was opened with Options.ReadOnly, return an error.
-	if db.readOnly {
-		return nil, ErrDatabaseReadOnly
-	}
-
-	// Obtain writer lock. This is released by the transaction when it closes.
-	// This enforces only one writer transaction at a time.
-	db.rwlock.Lock()
-
-	// Once we have the writer lock then we can lock the meta pages so that
-	// we can set up the transaction.
-	db.metalock.Lock()
-	defer db.metalock.Unlock()
-
-	// Exit if the database is not open yet.
-	if !db.opened {
-		db.rwlock.Unlock()
-		return nil, ErrDatabaseNotOpen
-	}
-
-	// Create a transaction associated with the database.
-	t := &Tx{writable: true}
-	t.init(db)
-	db.rwtx = t
-
-	// Free any pages associated with closed read-only transactions.
-	var minid txid = 0xFFFFFFFFFFFFFFFF
-	for _, t := range db.txs {
-		if t.meta.txid < minid {
-			minid = t.meta.txid
-		}
-	}
-	if minid > 0 {
-		db.freelist.release(minid - 1)
-	}
-
-	return t, nil
-}
-
-// removeTx removes a transaction from the database.
-func (db *DB) removeTx(tx *Tx) {
-	// Release the read lock on the mmap.
-	db.mmaplock.RUnlock()
-
-	// Use the meta lock to restrict access to the DB object.
-	db.metalock.Lock()
-
-	// Remove the transaction.
-	for i, t := range db.txs {
-		if t == tx {
-			db.txs = append(db.txs[:i], db.txs[i+1:]...)
-			break
-		}
-	}
-	n := len(db.txs)
-
-	// Unlock the meta pages.
-	db.metalock.Unlock()
-
-	// Merge statistics.
-	db.statlock.Lock()
-	db.stats.OpenTxN = n
-	db.stats.TxStats.add(&tx.stats)
-	db.statlock.Unlock()
-}
-
-// Update executes a function within the context of a read-write managed transaction.
-// If no error is returned from the function then the transaction is committed.
-// If an error is returned then the entire transaction is rolled back.
-// Any error that is returned from the function or returned from the commit is
-// returned from the Update() method.
-//
-// Attempting to manually commit or rollback within the function will cause a panic.
-func (db *DB) Update(fn func(*Tx) error) error {
-	t, err := db.Begin(true)
-	if err != nil {
-		return err
-	}
-
-	// Make sure the transaction rolls back in the event of a panic.
-	defer func() {
-		if t.db != nil {
-			t.rollback()
-		}
-	}()
-
-	// Mark as a managed tx so that the inner function cannot manually commit.
-	t.managed = true
-
-	// If an error is returned from the function then rollback and return error.
-	err = fn(t)
-	t.managed = false
-	if err != nil {
-		_ = t.Rollback()
-		return err
-	}
-
-	return t.Commit()
-}
-
-// View executes a function within the context of a managed read-only transaction.
-// Any error that is returned from the function is returned from the View() method.
-//
-// Attempting to manually rollback within the function will cause a panic.
-func (db *DB) View(fn func(*Tx) error) error {
-	t, err := db.Begin(false)
-	if err != nil {
-		return err
-	}
-
-	// Make sure the transaction rolls back in the event of a panic.
-	defer func() {
-		if t.db != nil {
-			t.rollback()
-		}
-	}()
-
-	// Mark as a managed tx so that the inner function cannot manually rollback.
-	t.managed = true
-
-	// If an error is returned from the function then pass it through.
-	err = fn(t)
-	t.managed = false
-	if err != nil {
-		_ = t.Rollback()
-		return err
-	}
-
-	if err := t.Rollback(); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-// Batch calls fn as part of a batch. It behaves similar to Update,
-// except:
-//
-// 1. concurrent Batch calls can be combined into a single Bolt
-// transaction.
-//
-// 2. the function passed to Batch may be called multiple times,
-// regardless of whether it returns error or not.
-//
-// This means that Batch function side effects must be idempotent and
-// take permanent effect only after a successful return is seen in
-// caller.
-//
-// The maximum batch size and delay can be adjusted with DB.MaxBatchSize
-// and DB.MaxBatchDelay, respectively.
-//
-// Batch is only useful when there are multiple goroutines calling it.
-func (db *DB) Batch(fn func(*Tx) error) error {
-	errCh := make(chan error, 1)
-
-	db.batchMu.Lock()
-	if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) {
-		// There is no existing batch, or the existing batch is full; start a new one.
-		db.batch = &batch{
-			db: db,
-		}
-		db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger)
-	}
-	db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh})
-	if len(db.batch.calls) >= db.MaxBatchSize {
-		// wake up batch, it's ready to run
-		go db.batch.trigger()
-	}
-	db.batchMu.Unlock()
-
-	err := <-errCh
-	if err == trySolo {
-		err = db.Update(fn)
-	}
-	return err
-}
-
-type call struct {
-	fn  func(*Tx) error
-	err chan<- error
-}
-
-type batch struct {
-	db    *DB
-	timer *time.Timer
-	start sync.Once
-	calls []call
-}
-
-// trigger runs the batch if it hasn't already been run.
-func (b *batch) trigger() {
-	b.start.Do(b.run)
-}
-
-// run performs the transactions in the batch and communicates results
-// back to DB.Batch.
-func (b *batch) run() {
-	b.db.batchMu.Lock()
-	b.timer.Stop()
-	// Make sure no new work is added to this batch, but don't break
-	// other batches.
-	if b.db.batch == b {
-		b.db.batch = nil
-	}
-	b.db.batchMu.Unlock()
-
-retry:
-	for len(b.calls) > 0 {
-		var failIdx = -1
-		err := b.db.Update(func(tx *Tx) error {
-			for i, c := range b.calls {
-				if err := safelyCall(c.fn, tx); err != nil {
-					failIdx = i
-					return err
-				}
-			}
-			return nil
-		})
-
-		if failIdx >= 0 {
-			// take the failing transaction out of the batch. it's
-			// safe to shorten b.calls here because db.batch no longer
-			// points to us, and we hold the mutex anyway.
-			c := b.calls[failIdx]
-			b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1]
-			// tell the submitter re-run it solo, continue with the rest of the batch
-			c.err <- trySolo
-			continue retry
-		}
-
-		// pass success, or bolt internal errors, to all callers
-		for _, c := range b.calls {
-			if c.err != nil {
-				c.err <- err
-			}
-		}
-		break retry
-	}
-}
-
-// trySolo is a special sentinel error value used for signaling that a
-// transaction function should be re-run. It should never be seen by
-// callers.
-var trySolo = errors.New("batch function returned an error and should be re-run solo")
-
-type panicked struct {
-	reason interface{}
-}
-
-func (p panicked) Error() string {
-	if err, ok := p.reason.(error); ok {
-		return err.Error()
-	}
-	return fmt.Sprintf("panic: %v", p.reason)
-}
-
-func safelyCall(fn func(*Tx) error, tx *Tx) (err error) {
-	defer func() {
-		if p := recover(); p != nil {
-			err = panicked{p}
-		}
-	}()
-	return fn(tx)
-}
-
-// Sync executes fdatasync() against the database file handle.
-//
-// This is not necessary under normal operation, however, if you use NoSync
-// then it allows you to force the database file to sync against the disk.
-func (db *DB) Sync() error { return fdatasync(db) }
-
-// Stats retrieves ongoing performance stats for the database.
-// This is only updated when a transaction closes.
-func (db *DB) Stats() Stats {
-	db.statlock.RLock()
-	defer db.statlock.RUnlock()
-	return db.stats
-}
-
-// This is for internal access to the raw data bytes from the C cursor, use
-// carefully, or not at all.
-func (db *DB) Info() *Info {
-	return &Info{uintptr(unsafe.Pointer(&db.data[0])), db.pageSize}
-}
-
-// page retrieves a page reference from the mmap based on the current page size.
-func (db *DB) page(id pgid) *page {
-	pos := id * pgid(db.pageSize)
-	return (*page)(unsafe.Pointer(&db.data[pos]))
-}
-
-// pageInBuffer retrieves a page reference from a given byte array based on the current page size.
-func (db *DB) pageInBuffer(b []byte, id pgid) *page {
-	return (*page)(unsafe.Pointer(&b[id*pgid(db.pageSize)]))
-}
-
-// meta retrieves the current meta page reference.
-func (db *DB) meta() *meta {
-	// We have to return the meta with the highest txid which doesn't fail
-	// validation. Otherwise, we can cause errors when in fact the database is
-	// in a consistent state. metaA is the one with the higher txid.
-	metaA := db.meta0
-	metaB := db.meta1
-	if db.meta1.txid > db.meta0.txid {
-		metaA = db.meta1
-		metaB = db.meta0
-	}
-
-	// Use higher meta page if valid. Otherwise fallback to previous, if valid.
-	if err := metaA.validate(); err == nil {
-		return metaA
-	} else if err := metaB.validate(); err == nil {
-		return metaB
-	}
-
-	// This should never be reached, because both meta1 and meta0 were validated
-	// on mmap() and we do fsync() on every write.
-	panic("bolt.DB.meta(): invalid meta pages")
-}
-
-// allocate returns a contiguous block of memory starting at a given page.
-func (db *DB) allocate(count int) (*page, error) {
-	// Allocate a temporary buffer for the page.
-	var buf []byte
-	if count == 1 {
-		buf = db.pagePool.Get().([]byte)
-	} else {
-		buf = make([]byte, count*db.pageSize)
-	}
-	p := (*page)(unsafe.Pointer(&buf[0]))
-	p.overflow = uint32(count - 1)
-
-	// Use pages from the freelist if they are available.
-	if p.id = db.freelist.allocate(count); p.id != 0 {
-		return p, nil
-	}
-
-	// Resize mmap() if we're at the end.
-	p.id = db.rwtx.meta.pgid
-	var minsz = int((p.id+pgid(count))+1) * db.pageSize
-	if minsz >= db.datasz {
-		if err := db.mmap(minsz); err != nil {
-			return nil, fmt.Errorf("mmap allocate error: %s", err)
-		}
-	}
-
-	// Move the page id high water mark.
-	db.rwtx.meta.pgid += pgid(count)
-
-	return p, nil
-}
-
-// grow grows the size of the database to the given sz.
-func (db *DB) grow(sz int) error {
-	// Ignore if the new size is less than available file size.
-	if sz <= db.filesz {
-		return nil
-	}
-
-	// If the data is smaller than the alloc size then only allocate what's needed.
-	// Once it goes over the allocation size then allocate in chunks.
-	if db.datasz < db.AllocSize {
-		sz = db.datasz
-	} else {
-		sz += db.AllocSize
-	}
-
-	// Truncate and fsync to ensure file size metadata is flushed.
-	// https://github.com/boltdb/bolt/issues/284
-	if !db.NoGrowSync && !db.readOnly {
-		if runtime.GOOS != "windows" {
-			if err := db.file.Truncate(int64(sz)); err != nil {
-				return fmt.Errorf("file resize error: %s", err)
-			}
-		}
-		if err := db.file.Sync(); err != nil {
-			return fmt.Errorf("file sync error: %s", err)
-		}
-	}
-
-	db.filesz = sz
-	return nil
-}
-
-func (db *DB) IsReadOnly() bool {
-	return db.readOnly
-}
-
-// Options represents the options that can be set when opening a database.
-type Options struct {
-	// Timeout is the amount of time to wait to obtain a file lock.
-	// When set to zero it will wait indefinitely. This option is only
-	// available on Darwin and Linux.
-	Timeout time.Duration
-
-	// Sets the DB.NoGrowSync flag before memory mapping the file.
-	NoGrowSync bool
-
-	// Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to
-	// grab a shared lock (UNIX).
-	ReadOnly bool
-
-	// Sets the DB.MmapFlags flag before memory mapping the file.
-	MmapFlags int
-
-	// InitialMmapSize is the initial mmap size of the database
-	// in bytes. Read transactions won't block write transaction
-	// if the InitialMmapSize is large enough to hold database mmap
-	// size. (See DB.Begin for more information)
-	//
-	// If <=0, the initial map size is 0.
-	// If initialMmapSize is smaller than the previous database size,
-	// it takes no effect.
-	InitialMmapSize int
-}
-
-// DefaultOptions represent the options used if nil options are passed into Open().
-// No timeout is used which will cause Bolt to wait indefinitely for a lock.
-var DefaultOptions = &Options{
-	Timeout:    0,
-	NoGrowSync: false,
-}
-
-// Stats represents statistics about the database.
-type Stats struct {
-	// Freelist stats
-	FreePageN     int // total number of free pages on the freelist
-	PendingPageN  int // total number of pending pages on the freelist
-	FreeAlloc     int // total bytes allocated in free pages
-	FreelistInuse int // total bytes used by the freelist
-
-	// Transaction stats
-	TxN     int // total number of started read transactions
-	OpenTxN int // number of currently open read transactions
-
-	TxStats TxStats // global, ongoing stats.
-}
-
-// Sub calculates and returns the difference between two sets of database stats.
-// This is useful when obtaining stats at two different points and time and
-// you need the performance counters that occurred within that time span.
-func (s *Stats) Sub(other *Stats) Stats {
-	if other == nil {
-		return *s
-	}
-	var diff Stats
-	diff.FreePageN = s.FreePageN
-	diff.PendingPageN = s.PendingPageN
-	diff.FreeAlloc = s.FreeAlloc
-	diff.FreelistInuse = s.FreelistInuse
-	diff.TxN = other.TxN - s.TxN
-	diff.TxStats = s.TxStats.Sub(&other.TxStats)
-	return diff
-}
-
-func (s *Stats) add(other *Stats) {
-	s.TxStats.add(&other.TxStats)
-}
-
-type Info struct {
-	Data     uintptr
-	PageSize int
-}
-
-type meta struct {
-	magic    uint32
-	version  uint32
-	pageSize uint32
-	flags    uint32
-	root     bucket
-	freelist pgid
-	pgid     pgid
-	txid     txid
-	checksum uint64
-}
-
-// validate checks the marker bytes and version of the meta page to ensure it matches this binary.
-func (m *meta) validate() error {
-	if m.magic != magic {
-		return ErrInvalid
-	} else if m.version != version {
-		return ErrVersionMismatch
-	} else if m.checksum != 0 && m.checksum != m.sum64() {
-		return ErrChecksum
-	}
-	return nil
-}
-
-// copy copies one meta object to another.
-func (m *meta) copy(dest *meta) {
-	*dest = *m
-}
-
-// write writes the meta onto a page.
-func (m *meta) write(p *page) {
-	if m.root.root >= m.pgid {
-		panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid))
-	} else if m.freelist >= m.pgid {
-		panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid))
-	}
-
-	// Page id is either going to be 0 or 1 which we can determine by the transaction ID.
-	p.id = pgid(m.txid % 2)
-	p.flags |= metaPageFlag
-
-	// Calculate the checksum.
-	m.checksum = m.sum64()
-
-	m.copy(p.meta())
-}
-
-// generates the checksum for the meta.
-func (m *meta) sum64() uint64 {
-	var h = fnv.New64a()
-	_, _ = h.Write((*[unsafe.Offsetof(meta{}.checksum)]byte)(unsafe.Pointer(m))[:])
-	return h.Sum64()
-}
-
-// _assert will panic with a given formatted message if the given condition is false.
-func _assert(condition bool, msg string, v ...interface{}) {
-	if !condition {
-		panic(fmt.Sprintf("assertion failed: "+msg, v...))
-	}
-}
-
-func warn(v ...interface{})              { fmt.Fprintln(os.Stderr, v...) }
-func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) }
-
-func printstack() {
-	stack := strings.Join(strings.Split(string(debug.Stack()), "\n")[2:], "\n")
-	fmt.Fprintln(os.Stderr, stack)
-}

+ 0 - 44
vendor/github.com/boltdb/bolt/doc.go

@@ -1,44 +0,0 @@
-/*
-Package bolt implements a low-level key/value store in pure Go. It supports
-fully serializable transactions, ACID semantics, and lock-free MVCC with
-multiple readers and a single writer. Bolt can be used for projects that
-want a simple data store without the need to add large dependencies such as
-Postgres or MySQL.
-
-Bolt is a single-level, zero-copy, B+tree data store. This means that Bolt is
-optimized for fast read access and does not require recovery in the event of a
-system crash. Transactions which have not finished committing will simply be
-rolled back in the event of a crash.
-
-The design of Bolt is based on Howard Chu's LMDB database project.
-
-Bolt currently works on Windows, Mac OS X, and Linux.
-
-
-Basics
-
-There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is
-a collection of buckets and is represented by a single file on disk. A bucket is
-a collection of unique keys that are associated with values.
-
-Transactions provide either read-only or read-write access to the database.
-Read-only transactions can retrieve key/value pairs and can use Cursors to
-iterate over the dataset sequentially. Read-write transactions can create and
-delete buckets and can insert and remove keys. Only one read-write transaction
-is allowed at a time.
-
-
-Caveats
-
-The database uses a read-only, memory-mapped data file to ensure that
-applications cannot corrupt the database, however, this means that keys and
-values returned from Bolt cannot be changed. Writing to a read-only byte slice
-will cause Go to panic.
-
-Keys and values retrieved from the database are only valid for the life of
-the transaction. When used outside the transaction, these byte slices can
-point to different data or can point to invalid memory which will cause a panic.
-
-
-*/
-package bolt

+ 0 - 71
vendor/github.com/boltdb/bolt/errors.go

@@ -1,71 +0,0 @@
-package bolt
-
-import "errors"
-
-// These errors can be returned when opening or calling methods on a DB.
-var (
-	// ErrDatabaseNotOpen is returned when a DB instance is accessed before it
-	// is opened or after it is closed.
-	ErrDatabaseNotOpen = errors.New("database not open")
-
-	// ErrDatabaseOpen is returned when opening a database that is
-	// already open.
-	ErrDatabaseOpen = errors.New("database already open")
-
-	// ErrInvalid is returned when both meta pages on a database are invalid.
-	// This typically occurs when a file is not a bolt database.
-	ErrInvalid = errors.New("invalid database")
-
-	// ErrVersionMismatch is returned when the data file was created with a
-	// different version of Bolt.
-	ErrVersionMismatch = errors.New("version mismatch")
-
-	// ErrChecksum is returned when either meta page checksum does not match.
-	ErrChecksum = errors.New("checksum error")
-
-	// ErrTimeout is returned when a database cannot obtain an exclusive lock
-	// on the data file after the timeout passed to Open().
-	ErrTimeout = errors.New("timeout")
-)
-
-// These errors can occur when beginning or committing a Tx.
-var (
-	// ErrTxNotWritable is returned when performing a write operation on a
-	// read-only transaction.
-	ErrTxNotWritable = errors.New("tx not writable")
-
-	// ErrTxClosed is returned when committing or rolling back a transaction
-	// that has already been committed or rolled back.
-	ErrTxClosed = errors.New("tx closed")
-
-	// ErrDatabaseReadOnly is returned when a mutating transaction is started on a
-	// read-only database.
-	ErrDatabaseReadOnly = errors.New("database is in read-only mode")
-)
-
-// These errors can occur when putting or deleting a value or a bucket.
-var (
-	// ErrBucketNotFound is returned when trying to access a bucket that has
-	// not been created yet.
-	ErrBucketNotFound = errors.New("bucket not found")
-
-	// ErrBucketExists is returned when creating a bucket that already exists.
-	ErrBucketExists = errors.New("bucket already exists")
-
-	// ErrBucketNameRequired is returned when creating a bucket with a blank name.
-	ErrBucketNameRequired = errors.New("bucket name required")
-
-	// ErrKeyRequired is returned when inserting a zero-length key.
-	ErrKeyRequired = errors.New("key required")
-
-	// ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize.
-	ErrKeyTooLarge = errors.New("key too large")
-
-	// ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize.
-	ErrValueTooLarge = errors.New("value too large")
-
-	// ErrIncompatibleValue is returned when trying create or delete a bucket
-	// on an existing non-bucket key or when trying to create or delete a
-	// non-bucket key on an existing bucket key.
-	ErrIncompatibleValue = errors.New("incompatible value")
-)

+ 0 - 248
vendor/github.com/boltdb/bolt/freelist.go

@@ -1,248 +0,0 @@
-package bolt
-
-import (
-	"fmt"
-	"sort"
-	"unsafe"
-)
-
-// freelist represents a list of all pages that are available for allocation.
-// It also tracks pages that have been freed but are still in use by open transactions.
-type freelist struct {
-	ids     []pgid          // all free and available free page ids.
-	pending map[txid][]pgid // mapping of soon-to-be free page ids by tx.
-	cache   map[pgid]bool   // fast lookup of all free and pending page ids.
-}
-
-// newFreelist returns an empty, initialized freelist.
-func newFreelist() *freelist {
-	return &freelist{
-		pending: make(map[txid][]pgid),
-		cache:   make(map[pgid]bool),
-	}
-}
-
-// size returns the size of the page after serialization.
-func (f *freelist) size() int {
-	return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * f.count())
-}
-
-// count returns count of pages on the freelist
-func (f *freelist) count() int {
-	return f.free_count() + f.pending_count()
-}
-
-// free_count returns count of free pages
-func (f *freelist) free_count() int {
-	return len(f.ids)
-}
-
-// pending_count returns count of pending pages
-func (f *freelist) pending_count() int {
-	var count int
-	for _, list := range f.pending {
-		count += len(list)
-	}
-	return count
-}
-
-// all returns a list of all free ids and all pending ids in one sorted list.
-func (f *freelist) all() []pgid {
-	m := make(pgids, 0)
-
-	for _, list := range f.pending {
-		m = append(m, list...)
-	}
-
-	sort.Sort(m)
-	return pgids(f.ids).merge(m)
-}
-
-// allocate returns the starting page id of a contiguous list of pages of a given size.
-// If a contiguous block cannot be found then 0 is returned.
-func (f *freelist) allocate(n int) pgid {
-	if len(f.ids) == 0 {
-		return 0
-	}
-
-	var initial, previd pgid
-	for i, id := range f.ids {
-		if id <= 1 {
-			panic(fmt.Sprintf("invalid page allocation: %d", id))
-		}
-
-		// Reset initial page if this is not contiguous.
-		if previd == 0 || id-previd != 1 {
-			initial = id
-		}
-
-		// If we found a contiguous block then remove it and return it.
-		if (id-initial)+1 == pgid(n) {
-			// If we're allocating off the beginning then take the fast path
-			// and just adjust the existing slice. This will use extra memory
-			// temporarily but the append() in free() will realloc the slice
-			// as is necessary.
-			if (i + 1) == n {
-				f.ids = f.ids[i+1:]
-			} else {
-				copy(f.ids[i-n+1:], f.ids[i+1:])
-				f.ids = f.ids[:len(f.ids)-n]
-			}
-
-			// Remove from the free cache.
-			for i := pgid(0); i < pgid(n); i++ {
-				delete(f.cache, initial+i)
-			}
-
-			return initial
-		}
-
-		previd = id
-	}
-	return 0
-}
-
-// free releases a page and its overflow for a given transaction id.
-// If the page is already free then a panic will occur.
-func (f *freelist) free(txid txid, p *page) {
-	if p.id <= 1 {
-		panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id))
-	}
-
-	// Free page and all its overflow pages.
-	var ids = f.pending[txid]
-	for id := p.id; id <= p.id+pgid(p.overflow); id++ {
-		// Verify that page is not already free.
-		if f.cache[id] {
-			panic(fmt.Sprintf("page %d already freed", id))
-		}
-
-		// Add to the freelist and cache.
-		ids = append(ids, id)
-		f.cache[id] = true
-	}
-	f.pending[txid] = ids
-}
-
-// release moves all page ids for a transaction id (or older) to the freelist.
-func (f *freelist) release(txid txid) {
-	m := make(pgids, 0)
-	for tid, ids := range f.pending {
-		if tid <= txid {
-			// Move transaction's pending pages to the available freelist.
-			// Don't remove from the cache since the page is still free.
-			m = append(m, ids...)
-			delete(f.pending, tid)
-		}
-	}
-	sort.Sort(m)
-	f.ids = pgids(f.ids).merge(m)
-}
-
-// rollback removes the pages from a given pending tx.
-func (f *freelist) rollback(txid txid) {
-	// Remove page ids from cache.
-	for _, id := range f.pending[txid] {
-		delete(f.cache, id)
-	}
-
-	// Remove pages from pending list.
-	delete(f.pending, txid)
-}
-
-// freed returns whether a given page is in the free list.
-func (f *freelist) freed(pgid pgid) bool {
-	return f.cache[pgid]
-}
-
-// read initializes the freelist from a freelist page.
-func (f *freelist) read(p *page) {
-	// If the page.count is at the max uint16 value (64k) then it's considered
-	// an overflow and the size of the freelist is stored as the first element.
-	idx, count := 0, int(p.count)
-	if count == 0xFFFF {
-		idx = 1
-		count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0])
-	}
-
-	// Copy the list of page ids from the freelist.
-	if count == 0 {
-		f.ids = nil
-	} else {
-		ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count]
-		f.ids = make([]pgid, len(ids))
-		copy(f.ids, ids)
-
-		// Make sure they're sorted.
-		sort.Sort(pgids(f.ids))
-	}
-
-	// Rebuild the page cache.
-	f.reindex()
-}
-
-// write writes the page ids onto a freelist page. All free and pending ids are
-// saved to disk since in the event of a program crash, all pending ids will
-// become free.
-func (f *freelist) write(p *page) error {
-	// Combine the old free pgids and pgids waiting on an open transaction.
-	ids := f.all()
-
-	// Update the header flag.
-	p.flags |= freelistPageFlag
-
-	// The page.count can only hold up to 64k elements so if we overflow that
-	// number then we handle it by putting the size in the first element.
-	if len(ids) == 0 {
-		p.count = uint16(len(ids))
-	} else if len(ids) < 0xFFFF {
-		p.count = uint16(len(ids))
-		copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:], ids)
-	} else {
-		p.count = 0xFFFF
-		((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(len(ids))
-		copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:], ids)
-	}
-
-	return nil
-}
-
-// reload reads the freelist from a page and filters out pending items.
-func (f *freelist) reload(p *page) {
-	f.read(p)
-
-	// Build a cache of only pending pages.
-	pcache := make(map[pgid]bool)
-	for _, pendingIDs := range f.pending {
-		for _, pendingID := range pendingIDs {
-			pcache[pendingID] = true
-		}
-	}
-
-	// Check each page in the freelist and build a new available freelist
-	// with any pages not in the pending lists.
-	var a []pgid
-	for _, id := range f.ids {
-		if !pcache[id] {
-			a = append(a, id)
-		}
-	}
-	f.ids = a
-
-	// Once the available list is rebuilt then rebuild the free cache so that
-	// it includes the available and pending free pages.
-	f.reindex()
-}
-
-// reindex rebuilds the free cache based on available and pending free lists.
-func (f *freelist) reindex() {
-	f.cache = make(map[pgid]bool, len(f.ids))
-	for _, id := range f.ids {
-		f.cache[id] = true
-	}
-	for _, pendingIDs := range f.pending {
-		for _, pendingID := range pendingIDs {
-			f.cache[pendingID] = true
-		}
-	}
-}

+ 0 - 604
vendor/github.com/boltdb/bolt/node.go

@@ -1,604 +0,0 @@
-package bolt
-
-import (
-	"bytes"
-	"fmt"
-	"sort"
-	"unsafe"
-)
-
-// node represents an in-memory, deserialized page.
-type node struct {
-	bucket     *Bucket
-	isLeaf     bool
-	unbalanced bool
-	spilled    bool
-	key        []byte
-	pgid       pgid
-	parent     *node
-	children   nodes
-	inodes     inodes
-}
-
-// root returns the top-level node this node is attached to.
-func (n *node) root() *node {
-	if n.parent == nil {
-		return n
-	}
-	return n.parent.root()
-}
-
-// minKeys returns the minimum number of inodes this node should have.
-func (n *node) minKeys() int {
-	if n.isLeaf {
-		return 1
-	}
-	return 2
-}
-
-// size returns the size of the node after serialization.
-func (n *node) size() int {
-	sz, elsz := pageHeaderSize, n.pageElementSize()
-	for i := 0; i < len(n.inodes); i++ {
-		item := &n.inodes[i]
-		sz += elsz + len(item.key) + len(item.value)
-	}
-	return sz
-}
-
-// sizeLessThan returns true if the node is less than a given size.
-// This is an optimization to avoid calculating a large node when we only need
-// to know if it fits inside a certain page size.
-func (n *node) sizeLessThan(v int) bool {
-	sz, elsz := pageHeaderSize, n.pageElementSize()
-	for i := 0; i < len(n.inodes); i++ {
-		item := &n.inodes[i]
-		sz += elsz + len(item.key) + len(item.value)
-		if sz >= v {
-			return false
-		}
-	}
-	return true
-}
-
-// pageElementSize returns the size of each page element based on the type of node.
-func (n *node) pageElementSize() int {
-	if n.isLeaf {
-		return leafPageElementSize
-	}
-	return branchPageElementSize
-}
-
-// childAt returns the child node at a given index.
-func (n *node) childAt(index int) *node {
-	if n.isLeaf {
-		panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index))
-	}
-	return n.bucket.node(n.inodes[index].pgid, n)
-}
-
-// childIndex returns the index of a given child node.
-func (n *node) childIndex(child *node) int {
-	index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 })
-	return index
-}
-
-// numChildren returns the number of children.
-func (n *node) numChildren() int {
-	return len(n.inodes)
-}
-
-// nextSibling returns the next node with the same parent.
-func (n *node) nextSibling() *node {
-	if n.parent == nil {
-		return nil
-	}
-	index := n.parent.childIndex(n)
-	if index >= n.parent.numChildren()-1 {
-		return nil
-	}
-	return n.parent.childAt(index + 1)
-}
-
-// prevSibling returns the previous node with the same parent.
-func (n *node) prevSibling() *node {
-	if n.parent == nil {
-		return nil
-	}
-	index := n.parent.childIndex(n)
-	if index == 0 {
-		return nil
-	}
-	return n.parent.childAt(index - 1)
-}
-
-// put inserts a key/value.
-func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) {
-	if pgid >= n.bucket.tx.meta.pgid {
-		panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid))
-	} else if len(oldKey) <= 0 {
-		panic("put: zero-length old key")
-	} else if len(newKey) <= 0 {
-		panic("put: zero-length new key")
-	}
-
-	// Find insertion index.
-	index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 })
-
-	// Add capacity and shift nodes if we don't have an exact match and need to insert.
-	exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey))
-	if !exact {
-		n.inodes = append(n.inodes, inode{})
-		copy(n.inodes[index+1:], n.inodes[index:])
-	}
-
-	inode := &n.inodes[index]
-	inode.flags = flags
-	inode.key = newKey
-	inode.value = value
-	inode.pgid = pgid
-	_assert(len(inode.key) > 0, "put: zero-length inode key")
-}
-
-// del removes a key from the node.
-func (n *node) del(key []byte) {
-	// Find index of key.
-	index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 })
-
-	// Exit if the key isn't found.
-	if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) {
-		return
-	}
-
-	// Delete inode from the node.
-	n.inodes = append(n.inodes[:index], n.inodes[index+1:]...)
-
-	// Mark the node as needing rebalancing.
-	n.unbalanced = true
-}
-
-// read initializes the node from a page.
-func (n *node) read(p *page) {
-	n.pgid = p.id
-	n.isLeaf = ((p.flags & leafPageFlag) != 0)
-	n.inodes = make(inodes, int(p.count))
-
-	for i := 0; i < int(p.count); i++ {
-		inode := &n.inodes[i]
-		if n.isLeaf {
-			elem := p.leafPageElement(uint16(i))
-			inode.flags = elem.flags
-			inode.key = elem.key()
-			inode.value = elem.value()
-		} else {
-			elem := p.branchPageElement(uint16(i))
-			inode.pgid = elem.pgid
-			inode.key = elem.key()
-		}
-		_assert(len(inode.key) > 0, "read: zero-length inode key")
-	}
-
-	// Save first key so we can find the node in the parent when we spill.
-	if len(n.inodes) > 0 {
-		n.key = n.inodes[0].key
-		_assert(len(n.key) > 0, "read: zero-length node key")
-	} else {
-		n.key = nil
-	}
-}
-
-// write writes the items onto one or more pages.
-func (n *node) write(p *page) {
-	// Initialize page.
-	if n.isLeaf {
-		p.flags |= leafPageFlag
-	} else {
-		p.flags |= branchPageFlag
-	}
-
-	if len(n.inodes) >= 0xFFFF {
-		panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id))
-	}
-	p.count = uint16(len(n.inodes))
-
-	// Stop here if there are no items to write.
-	if p.count == 0 {
-		return
-	}
-
-	// Loop over each item and write it to the page.
-	b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):]
-	for i, item := range n.inodes {
-		_assert(len(item.key) > 0, "write: zero-length inode key")
-
-		// Write the page element.
-		if n.isLeaf {
-			elem := p.leafPageElement(uint16(i))
-			elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))
-			elem.flags = item.flags
-			elem.ksize = uint32(len(item.key))
-			elem.vsize = uint32(len(item.value))
-		} else {
-			elem := p.branchPageElement(uint16(i))
-			elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))
-			elem.ksize = uint32(len(item.key))
-			elem.pgid = item.pgid
-			_assert(elem.pgid != p.id, "write: circular dependency occurred")
-		}
-
-		// If the length of key+value is larger than the max allocation size
-		// then we need to reallocate the byte array pointer.
-		//
-		// See: https://github.com/boltdb/bolt/pull/335
-		klen, vlen := len(item.key), len(item.value)
-		if len(b) < klen+vlen {
-			b = (*[maxAllocSize]byte)(unsafe.Pointer(&b[0]))[:]
-		}
-
-		// Write data for the element to the end of the page.
-		copy(b[0:], item.key)
-		b = b[klen:]
-		copy(b[0:], item.value)
-		b = b[vlen:]
-	}
-
-	// DEBUG ONLY: n.dump()
-}
-
-// split breaks up a node into multiple smaller nodes, if appropriate.
-// This should only be called from the spill() function.
-func (n *node) split(pageSize int) []*node {
-	var nodes []*node
-
-	node := n
-	for {
-		// Split node into two.
-		a, b := node.splitTwo(pageSize)
-		nodes = append(nodes, a)
-
-		// If we can't split then exit the loop.
-		if b == nil {
-			break
-		}
-
-		// Set node to b so it gets split on the next iteration.
-		node = b
-	}
-
-	return nodes
-}
-
-// splitTwo breaks up a node into two smaller nodes, if appropriate.
-// This should only be called from the split() function.
-func (n *node) splitTwo(pageSize int) (*node, *node) {
-	// Ignore the split if the page doesn't have at least enough nodes for
-	// two pages or if the nodes can fit in a single page.
-	if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) {
-		return n, nil
-	}
-
-	// Determine the threshold before starting a new node.
-	var fillPercent = n.bucket.FillPercent
-	if fillPercent < minFillPercent {
-		fillPercent = minFillPercent
-	} else if fillPercent > maxFillPercent {
-		fillPercent = maxFillPercent
-	}
-	threshold := int(float64(pageSize) * fillPercent)
-
-	// Determine split position and sizes of the two pages.
-	splitIndex, _ := n.splitIndex(threshold)
-
-	// Split node into two separate nodes.
-	// If there's no parent then we'll need to create one.
-	if n.parent == nil {
-		n.parent = &node{bucket: n.bucket, children: []*node{n}}
-	}
-
-	// Create a new node and add it to the parent.
-	next := &node{bucket: n.bucket, isLeaf: n.isLeaf, parent: n.parent}
-	n.parent.children = append(n.parent.children, next)
-
-	// Split inodes across two nodes.
-	next.inodes = n.inodes[splitIndex:]
-	n.inodes = n.inodes[:splitIndex]
-
-	// Update the statistics.
-	n.bucket.tx.stats.Split++
-
-	return n, next
-}
-
-// splitIndex finds the position where a page will fill a given threshold.
-// It returns the index as well as the size of the first page.
-// This is only be called from split().
-func (n *node) splitIndex(threshold int) (index, sz int) {
-	sz = pageHeaderSize
-
-	// Loop until we only have the minimum number of keys required for the second page.
-	for i := 0; i < len(n.inodes)-minKeysPerPage; i++ {
-		index = i
-		inode := n.inodes[i]
-		elsize := n.pageElementSize() + len(inode.key) + len(inode.value)
-
-		// If we have at least the minimum number of keys and adding another
-		// node would put us over the threshold then exit and return.
-		if i >= minKeysPerPage && sz+elsize > threshold {
-			break
-		}
-
-		// Add the element size to the total size.
-		sz += elsize
-	}
-
-	return
-}
-
-// spill writes the nodes to dirty pages and splits nodes as it goes.
-// Returns an error if dirty pages cannot be allocated.
-func (n *node) spill() error {
-	var tx = n.bucket.tx
-	if n.spilled {
-		return nil
-	}
-
-	// Spill child nodes first. Child nodes can materialize sibling nodes in
-	// the case of split-merge so we cannot use a range loop. We have to check
-	// the children size on every loop iteration.
-	sort.Sort(n.children)
-	for i := 0; i < len(n.children); i++ {
-		if err := n.children[i].spill(); err != nil {
-			return err
-		}
-	}
-
-	// We no longer need the child list because it's only used for spill tracking.
-	n.children = nil
-
-	// Split nodes into appropriate sizes. The first node will always be n.
-	var nodes = n.split(tx.db.pageSize)
-	for _, node := range nodes {
-		// Add node's page to the freelist if it's not new.
-		if node.pgid > 0 {
-			tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid))
-			node.pgid = 0
-		}
-
-		// Allocate contiguous space for the node.
-		p, err := tx.allocate((node.size() / tx.db.pageSize) + 1)
-		if err != nil {
-			return err
-		}
-
-		// Write the node.
-		if p.id >= tx.meta.pgid {
-			panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid))
-		}
-		node.pgid = p.id
-		node.write(p)
-		node.spilled = true
-
-		// Insert into parent inodes.
-		if node.parent != nil {
-			var key = node.key
-			if key == nil {
-				key = node.inodes[0].key
-			}
-
-			node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0)
-			node.key = node.inodes[0].key
-			_assert(len(node.key) > 0, "spill: zero-length node key")
-		}
-
-		// Update the statistics.
-		tx.stats.Spill++
-	}
-
-	// If the root node split and created a new root then we need to spill that
-	// as well. We'll clear out the children to make sure it doesn't try to respill.
-	if n.parent != nil && n.parent.pgid == 0 {
-		n.children = nil
-		return n.parent.spill()
-	}
-
-	return nil
-}
-
-// rebalance attempts to combine the node with sibling nodes if the node fill
-// size is below a threshold or if there are not enough keys.
-func (n *node) rebalance() {
-	if !n.unbalanced {
-		return
-	}
-	n.unbalanced = false
-
-	// Update statistics.
-	n.bucket.tx.stats.Rebalance++
-
-	// Ignore if node is above threshold (25%) and has enough keys.
-	var threshold = n.bucket.tx.db.pageSize / 4
-	if n.size() > threshold && len(n.inodes) > n.minKeys() {
-		return
-	}
-
-	// Root node has special handling.
-	if n.parent == nil {
-		// If root node is a branch and only has one node then collapse it.
-		if !n.isLeaf && len(n.inodes) == 1 {
-			// Move root's child up.
-			child := n.bucket.node(n.inodes[0].pgid, n)
-			n.isLeaf = child.isLeaf
-			n.inodes = child.inodes[:]
-			n.children = child.children
-
-			// Reparent all child nodes being moved.
-			for _, inode := range n.inodes {
-				if child, ok := n.bucket.nodes[inode.pgid]; ok {
-					child.parent = n
-				}
-			}
-
-			// Remove old child.
-			child.parent = nil
-			delete(n.bucket.nodes, child.pgid)
-			child.free()
-		}
-
-		return
-	}
-
-	// If node has no keys then just remove it.
-	if n.numChildren() == 0 {
-		n.parent.del(n.key)
-		n.parent.removeChild(n)
-		delete(n.bucket.nodes, n.pgid)
-		n.free()
-		n.parent.rebalance()
-		return
-	}
-
-	_assert(n.parent.numChildren() > 1, "parent must have at least 2 children")
-
-	// Destination node is right sibling if idx == 0, otherwise left sibling.
-	var target *node
-	var useNextSibling = (n.parent.childIndex(n) == 0)
-	if useNextSibling {
-		target = n.nextSibling()
-	} else {
-		target = n.prevSibling()
-	}
-
-	// If both this node and the target node are too small then merge them.
-	if useNextSibling {
-		// Reparent all child nodes being moved.
-		for _, inode := range target.inodes {
-			if child, ok := n.bucket.nodes[inode.pgid]; ok {
-				child.parent.removeChild(child)
-				child.parent = n
-				child.parent.children = append(child.parent.children, child)
-			}
-		}
-
-		// Copy over inodes from target and remove target.
-		n.inodes = append(n.inodes, target.inodes...)
-		n.parent.del(target.key)
-		n.parent.removeChild(target)
-		delete(n.bucket.nodes, target.pgid)
-		target.free()
-	} else {
-		// Reparent all child nodes being moved.
-		for _, inode := range n.inodes {
-			if child, ok := n.bucket.nodes[inode.pgid]; ok {
-				child.parent.removeChild(child)
-				child.parent = target
-				child.parent.children = append(child.parent.children, child)
-			}
-		}
-
-		// Copy over inodes to target and remove node.
-		target.inodes = append(target.inodes, n.inodes...)
-		n.parent.del(n.key)
-		n.parent.removeChild(n)
-		delete(n.bucket.nodes, n.pgid)
-		n.free()
-	}
-
-	// Either this node or the target node was deleted from the parent so rebalance it.
-	n.parent.rebalance()
-}
-
-// removes a node from the list of in-memory children.
-// This does not affect the inodes.
-func (n *node) removeChild(target *node) {
-	for i, child := range n.children {
-		if child == target {
-			n.children = append(n.children[:i], n.children[i+1:]...)
-			return
-		}
-	}
-}
-
-// dereference causes the node to copy all its inode key/value references to heap memory.
-// This is required when the mmap is reallocated so inodes are not pointing to stale data.
-func (n *node) dereference() {
-	if n.key != nil {
-		key := make([]byte, len(n.key))
-		copy(key, n.key)
-		n.key = key
-		_assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node")
-	}
-
-	for i := range n.inodes {
-		inode := &n.inodes[i]
-
-		key := make([]byte, len(inode.key))
-		copy(key, inode.key)
-		inode.key = key
-		_assert(len(inode.key) > 0, "dereference: zero-length inode key")
-
-		value := make([]byte, len(inode.value))
-		copy(value, inode.value)
-		inode.value = value
-	}
-
-	// Recursively dereference children.
-	for _, child := range n.children {
-		child.dereference()
-	}
-
-	// Update statistics.
-	n.bucket.tx.stats.NodeDeref++
-}
-
-// free adds the node's underlying page to the freelist.
-func (n *node) free() {
-	if n.pgid != 0 {
-		n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid))
-		n.pgid = 0
-	}
-}
-
-// dump writes the contents of the node to STDERR for debugging purposes.
-/*
-func (n *node) dump() {
-	// Write node header.
-	var typ = "branch"
-	if n.isLeaf {
-		typ = "leaf"
-	}
-	warnf("[NODE %d {type=%s count=%d}]", n.pgid, typ, len(n.inodes))
-
-	// Write out abbreviated version of each item.
-	for _, item := range n.inodes {
-		if n.isLeaf {
-			if item.flags&bucketLeafFlag != 0 {
-				bucket := (*bucket)(unsafe.Pointer(&item.value[0]))
-				warnf("+L %08x -> (bucket root=%d)", trunc(item.key, 4), bucket.root)
-			} else {
-				warnf("+L %08x -> %08x", trunc(item.key, 4), trunc(item.value, 4))
-			}
-		} else {
-			warnf("+B %08x -> pgid=%d", trunc(item.key, 4), item.pgid)
-		}
-	}
-	warn("")
-}
-*/
-
-type nodes []*node
-
-func (s nodes) Len() int           { return len(s) }
-func (s nodes) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
-func (s nodes) Less(i, j int) bool { return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 }
-
-// inode represents an internal node inside of a node.
-// It can be used to point to elements in a page or point
-// to an element which hasn't been added to a page yet.
-type inode struct {
-	flags uint32
-	pgid  pgid
-	key   []byte
-	value []byte
-}
-
-type inodes []inode

+ 0 - 178
vendor/github.com/boltdb/bolt/page.go

@@ -1,178 +0,0 @@
-package bolt
-
-import (
-	"fmt"
-	"os"
-	"sort"
-	"unsafe"
-)
-
-const pageHeaderSize = int(unsafe.Offsetof(((*page)(nil)).ptr))
-
-const minKeysPerPage = 2
-
-const branchPageElementSize = int(unsafe.Sizeof(branchPageElement{}))
-const leafPageElementSize = int(unsafe.Sizeof(leafPageElement{}))
-
-const (
-	branchPageFlag   = 0x01
-	leafPageFlag     = 0x02
-	metaPageFlag     = 0x04
-	freelistPageFlag = 0x10
-)
-
-const (
-	bucketLeafFlag = 0x01
-)
-
-type pgid uint64
-
-type page struct {
-	id       pgid
-	flags    uint16
-	count    uint16
-	overflow uint32
-	ptr      uintptr
-}
-
-// typ returns a human readable page type string used for debugging.
-func (p *page) typ() string {
-	if (p.flags & branchPageFlag) != 0 {
-		return "branch"
-	} else if (p.flags & leafPageFlag) != 0 {
-		return "leaf"
-	} else if (p.flags & metaPageFlag) != 0 {
-		return "meta"
-	} else if (p.flags & freelistPageFlag) != 0 {
-		return "freelist"
-	}
-	return fmt.Sprintf("unknown<%02x>", p.flags)
-}
-
-// meta returns a pointer to the metadata section of the page.
-func (p *page) meta() *meta {
-	return (*meta)(unsafe.Pointer(&p.ptr))
-}
-
-// leafPageElement retrieves the leaf node by index
-func (p *page) leafPageElement(index uint16) *leafPageElement {
-	n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index]
-	return n
-}
-
-// leafPageElements retrieves a list of leaf nodes.
-func (p *page) leafPageElements() []leafPageElement {
-	if p.count == 0 {
-		return nil
-	}
-	return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:]
-}
-
-// branchPageElement retrieves the branch node by index
-func (p *page) branchPageElement(index uint16) *branchPageElement {
-	return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index]
-}
-
-// branchPageElements retrieves a list of branch nodes.
-func (p *page) branchPageElements() []branchPageElement {
-	if p.count == 0 {
-		return nil
-	}
-	return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:]
-}
-
-// dump writes n bytes of the page to STDERR as hex output.
-func (p *page) hexdump(n int) {
-	buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:n]
-	fmt.Fprintf(os.Stderr, "%x\n", buf)
-}
-
-type pages []*page
-
-func (s pages) Len() int           { return len(s) }
-func (s pages) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
-func (s pages) Less(i, j int) bool { return s[i].id < s[j].id }
-
-// branchPageElement represents a node on a branch page.
-type branchPageElement struct {
-	pos   uint32
-	ksize uint32
-	pgid  pgid
-}
-
-// key returns a byte slice of the node key.
-func (n *branchPageElement) key() []byte {
-	buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
-	return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize]
-}
-
-// leafPageElement represents a node on a leaf page.
-type leafPageElement struct {
-	flags uint32
-	pos   uint32
-	ksize uint32
-	vsize uint32
-}
-
-// key returns a byte slice of the node key.
-func (n *leafPageElement) key() []byte {
-	buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
-	return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize:n.ksize]
-}
-
-// value returns a byte slice of the node value.
-func (n *leafPageElement) value() []byte {
-	buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
-	return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize:n.vsize]
-}
-
-// PageInfo represents human readable information about a page.
-type PageInfo struct {
-	ID            int
-	Type          string
-	Count         int
-	OverflowCount int
-}
-
-type pgids []pgid
-
-func (s pgids) Len() int           { return len(s) }
-func (s pgids) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
-func (s pgids) Less(i, j int) bool { return s[i] < s[j] }
-
-// merge returns the sorted union of a and b.
-func (a pgids) merge(b pgids) pgids {
-	// Return the opposite slice if one is nil.
-	if len(a) == 0 {
-		return b
-	} else if len(b) == 0 {
-		return a
-	}
-
-	// Create a list to hold all elements from both lists.
-	merged := make(pgids, 0, len(a)+len(b))
-
-	// Assign lead to the slice with a lower starting value, follow to the higher value.
-	lead, follow := a, b
-	if b[0] < a[0] {
-		lead, follow = b, a
-	}
-
-	// Continue while there are elements in the lead.
-	for len(lead) > 0 {
-		// Merge largest prefix of lead that is ahead of follow[0].
-		n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] })
-		merged = append(merged, lead[:n]...)
-		if n >= len(lead) {
-			break
-		}
-
-		// Swap lead and follow.
-		lead, follow = follow, lead[n:]
-	}
-
-	// Append what's left in follow.
-	merged = append(merged, follow...)
-
-	return merged
-}

+ 0 - 682
vendor/github.com/boltdb/bolt/tx.go

@@ -1,682 +0,0 @@
-package bolt
-
-import (
-	"fmt"
-	"io"
-	"os"
-	"sort"
-	"strings"
-	"time"
-	"unsafe"
-)
-
-// txid represents the internal transaction identifier.
-type txid uint64
-
-// Tx represents a read-only or read/write transaction on the database.
-// Read-only transactions can be used for retrieving values for keys and creating cursors.
-// Read/write transactions can create and remove buckets and create and remove keys.
-//
-// IMPORTANT: You must commit or rollback transactions when you are done with
-// them. Pages can not be reclaimed by the writer until no more transactions
-// are using them. A long running read transaction can cause the database to
-// quickly grow.
-type Tx struct {
-	writable       bool
-	managed        bool
-	db             *DB
-	meta           *meta
-	root           Bucket
-	pages          map[pgid]*page
-	stats          TxStats
-	commitHandlers []func()
-
-	// WriteFlag specifies the flag for write-related methods like WriteTo().
-	// Tx opens the database file with the specified flag to copy the data.
-	//
-	// By default, the flag is unset, which works well for mostly in-memory
-	// workloads. For databases that are much larger than available RAM,
-	// set the flag to syscall.O_DIRECT to avoid trashing the page cache.
-	WriteFlag int
-}
-
-// init initializes the transaction.
-func (tx *Tx) init(db *DB) {
-	tx.db = db
-	tx.pages = nil
-
-	// Copy the meta page since it can be changed by the writer.
-	tx.meta = &meta{}
-	db.meta().copy(tx.meta)
-
-	// Copy over the root bucket.
-	tx.root = newBucket(tx)
-	tx.root.bucket = &bucket{}
-	*tx.root.bucket = tx.meta.root
-
-	// Increment the transaction id and add a page cache for writable transactions.
-	if tx.writable {
-		tx.pages = make(map[pgid]*page)
-		tx.meta.txid += txid(1)
-	}
-}
-
-// ID returns the transaction id.
-func (tx *Tx) ID() int {
-	return int(tx.meta.txid)
-}
-
-// DB returns a reference to the database that created the transaction.
-func (tx *Tx) DB() *DB {
-	return tx.db
-}
-
-// Size returns current database size in bytes as seen by this transaction.
-func (tx *Tx) Size() int64 {
-	return int64(tx.meta.pgid) * int64(tx.db.pageSize)
-}
-
-// Writable returns whether the transaction can perform write operations.
-func (tx *Tx) Writable() bool {
-	return tx.writable
-}
-
-// Cursor creates a cursor associated with the root bucket.
-// All items in the cursor will return a nil value because all root bucket keys point to buckets.
-// The cursor is only valid as long as the transaction is open.
-// Do not use a cursor after the transaction is closed.
-func (tx *Tx) Cursor() *Cursor {
-	return tx.root.Cursor()
-}
-
-// Stats retrieves a copy of the current transaction statistics.
-func (tx *Tx) Stats() TxStats {
-	return tx.stats
-}
-
-// Bucket retrieves a bucket by name.
-// Returns nil if the bucket does not exist.
-// The bucket instance is only valid for the lifetime of the transaction.
-func (tx *Tx) Bucket(name []byte) *Bucket {
-	return tx.root.Bucket(name)
-}
-
-// CreateBucket creates a new bucket.
-// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long.
-// The bucket instance is only valid for the lifetime of the transaction.
-func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) {
-	return tx.root.CreateBucket(name)
-}
-
-// CreateBucketIfNotExists creates a new bucket if it doesn't already exist.
-// Returns an error if the bucket name is blank, or if the bucket name is too long.
-// The bucket instance is only valid for the lifetime of the transaction.
-func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) {
-	return tx.root.CreateBucketIfNotExists(name)
-}
-
-// DeleteBucket deletes a bucket.
-// Returns an error if the bucket cannot be found or if the key represents a non-bucket value.
-func (tx *Tx) DeleteBucket(name []byte) error {
-	return tx.root.DeleteBucket(name)
-}
-
-// ForEach executes a function for each bucket in the root.
-// If the provided function returns an error then the iteration is stopped and
-// the error is returned to the caller.
-func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error {
-	return tx.root.ForEach(func(k, v []byte) error {
-		if err := fn(k, tx.root.Bucket(k)); err != nil {
-			return err
-		}
-		return nil
-	})
-}
-
-// OnCommit adds a handler function to be executed after the transaction successfully commits.
-func (tx *Tx) OnCommit(fn func()) {
-	tx.commitHandlers = append(tx.commitHandlers, fn)
-}
-
-// Commit writes all changes to disk and updates the meta page.
-// Returns an error if a disk write error occurs, or if Commit is
-// called on a read-only transaction.
-func (tx *Tx) Commit() error {
-	_assert(!tx.managed, "managed tx commit not allowed")
-	if tx.db == nil {
-		return ErrTxClosed
-	} else if !tx.writable {
-		return ErrTxNotWritable
-	}
-
-	// TODO(benbjohnson): Use vectorized I/O to write out dirty pages.
-
-	// Rebalance nodes which have had deletions.
-	var startTime = time.Now()
-	tx.root.rebalance()
-	if tx.stats.Rebalance > 0 {
-		tx.stats.RebalanceTime += time.Since(startTime)
-	}
-
-	// spill data onto dirty pages.
-	startTime = time.Now()
-	if err := tx.root.spill(); err != nil {
-		tx.rollback()
-		return err
-	}
-	tx.stats.SpillTime += time.Since(startTime)
-
-	// Free the old root bucket.
-	tx.meta.root.root = tx.root.root
-
-	opgid := tx.meta.pgid
-
-	// Free the freelist and allocate new pages for it. This will overestimate
-	// the size of the freelist but not underestimate the size (which would be bad).
-	tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist))
-	p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1)
-	if err != nil {
-		tx.rollback()
-		return err
-	}
-	if err := tx.db.freelist.write(p); err != nil {
-		tx.rollback()
-		return err
-	}
-	tx.meta.freelist = p.id
-
-	// If the high water mark has moved up then attempt to grow the database.
-	if tx.meta.pgid > opgid {
-		if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil {
-			tx.rollback()
-			return err
-		}
-	}
-
-	// Write dirty pages to disk.
-	startTime = time.Now()
-	if err := tx.write(); err != nil {
-		tx.rollback()
-		return err
-	}
-
-	// If strict mode is enabled then perform a consistency check.
-	// Only the first consistency error is reported in the panic.
-	if tx.db.StrictMode {
-		ch := tx.Check()
-		var errs []string
-		for {
-			err, ok := <-ch
-			if !ok {
-				break
-			}
-			errs = append(errs, err.Error())
-		}
-		if len(errs) > 0 {
-			panic("check fail: " + strings.Join(errs, "\n"))
-		}
-	}
-
-	// Write meta to disk.
-	if err := tx.writeMeta(); err != nil {
-		tx.rollback()
-		return err
-	}
-	tx.stats.WriteTime += time.Since(startTime)
-
-	// Finalize the transaction.
-	tx.close()
-
-	// Execute commit handlers now that the locks have been removed.
-	for _, fn := range tx.commitHandlers {
-		fn()
-	}
-
-	return nil
-}
-
-// Rollback closes the transaction and ignores all previous updates. Read-only
-// transactions must be rolled back and not committed.
-func (tx *Tx) Rollback() error {
-	_assert(!tx.managed, "managed tx rollback not allowed")
-	if tx.db == nil {
-		return ErrTxClosed
-	}
-	tx.rollback()
-	return nil
-}
-
-func (tx *Tx) rollback() {
-	if tx.db == nil {
-		return
-	}
-	if tx.writable {
-		tx.db.freelist.rollback(tx.meta.txid)
-		tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist))
-	}
-	tx.close()
-}
-
-func (tx *Tx) close() {
-	if tx.db == nil {
-		return
-	}
-	if tx.writable {
-		// Grab freelist stats.
-		var freelistFreeN = tx.db.freelist.free_count()
-		var freelistPendingN = tx.db.freelist.pending_count()
-		var freelistAlloc = tx.db.freelist.size()
-
-		// Remove transaction ref & writer lock.
-		tx.db.rwtx = nil
-		tx.db.rwlock.Unlock()
-
-		// Merge statistics.
-		tx.db.statlock.Lock()
-		tx.db.stats.FreePageN = freelistFreeN
-		tx.db.stats.PendingPageN = freelistPendingN
-		tx.db.stats.FreeAlloc = (freelistFreeN + freelistPendingN) * tx.db.pageSize
-		tx.db.stats.FreelistInuse = freelistAlloc
-		tx.db.stats.TxStats.add(&tx.stats)
-		tx.db.statlock.Unlock()
-	} else {
-		tx.db.removeTx(tx)
-	}
-
-	// Clear all references.
-	tx.db = nil
-	tx.meta = nil
-	tx.root = Bucket{tx: tx}
-	tx.pages = nil
-}
-
-// Copy writes the entire database to a writer.
-// This function exists for backwards compatibility. Use WriteTo() instead.
-func (tx *Tx) Copy(w io.Writer) error {
-	_, err := tx.WriteTo(w)
-	return err
-}
-
-// WriteTo writes the entire database to a writer.
-// If err == nil then exactly tx.Size() bytes will be written into the writer.
-func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
-	// Attempt to open reader with WriteFlag
-	f, err := os.OpenFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0)
-	if err != nil {
-		return 0, err
-	}
-	defer func() { _ = f.Close() }()
-
-	// Generate a meta page. We use the same page data for both meta pages.
-	buf := make([]byte, tx.db.pageSize)
-	page := (*page)(unsafe.Pointer(&buf[0]))
-	page.flags = metaPageFlag
-	*page.meta() = *tx.meta
-
-	// Write meta 0.
-	page.id = 0
-	page.meta().checksum = page.meta().sum64()
-	nn, err := w.Write(buf)
-	n += int64(nn)
-	if err != nil {
-		return n, fmt.Errorf("meta 0 copy: %s", err)
-	}
-
-	// Write meta 1 with a lower transaction id.
-	page.id = 1
-	page.meta().txid -= 1
-	page.meta().checksum = page.meta().sum64()
-	nn, err = w.Write(buf)
-	n += int64(nn)
-	if err != nil {
-		return n, fmt.Errorf("meta 1 copy: %s", err)
-	}
-
-	// Move past the meta pages in the file.
-	if _, err := f.Seek(int64(tx.db.pageSize*2), os.SEEK_SET); err != nil {
-		return n, fmt.Errorf("seek: %s", err)
-	}
-
-	// Copy data pages.
-	wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2))
-	n += wn
-	if err != nil {
-		return n, err
-	}
-
-	return n, f.Close()
-}
-
-// CopyFile copies the entire database to file at the given path.
-// A reader transaction is maintained during the copy so it is safe to continue
-// using the database while a copy is in progress.
-func (tx *Tx) CopyFile(path string, mode os.FileMode) error {
-	f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode)
-	if err != nil {
-		return err
-	}
-
-	err = tx.Copy(f)
-	if err != nil {
-		_ = f.Close()
-		return err
-	}
-	return f.Close()
-}
-
-// Check performs several consistency checks on the database for this transaction.
-// An error is returned if any inconsistency is found.
-//
-// It can be safely run concurrently on a writable transaction. However, this
-// incurs a high cost for large databases and databases with a lot of subbuckets
-// because of caching. This overhead can be removed if running on a read-only
-// transaction, however, it is not safe to execute other writer transactions at
-// the same time.
-func (tx *Tx) Check() <-chan error {
-	ch := make(chan error)
-	go tx.check(ch)
-	return ch
-}
-
-func (tx *Tx) check(ch chan error) {
-	// Check if any pages are double freed.
-	freed := make(map[pgid]bool)
-	for _, id := range tx.db.freelist.all() {
-		if freed[id] {
-			ch <- fmt.Errorf("page %d: already freed", id)
-		}
-		freed[id] = true
-	}
-
-	// Track every reachable page.
-	reachable := make(map[pgid]*page)
-	reachable[0] = tx.page(0) // meta0
-	reachable[1] = tx.page(1) // meta1
-	for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ {
-		reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist)
-	}
-
-	// Recursively check buckets.
-	tx.checkBucket(&tx.root, reachable, freed, ch)
-
-	// Ensure all pages below high water mark are either reachable or freed.
-	for i := pgid(0); i < tx.meta.pgid; i++ {
-		_, isReachable := reachable[i]
-		if !isReachable && !freed[i] {
-			ch <- fmt.Errorf("page %d: unreachable unfreed", int(i))
-		}
-	}
-
-	// Close the channel to signal completion.
-	close(ch)
-}
-
-func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, ch chan error) {
-	// Ignore inline buckets.
-	if b.root == 0 {
-		return
-	}
-
-	// Check every page used by this bucket.
-	b.tx.forEachPage(b.root, 0, func(p *page, _ int) {
-		if p.id > tx.meta.pgid {
-			ch <- fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid))
-		}
-
-		// Ensure each page is only referenced once.
-		for i := pgid(0); i <= pgid(p.overflow); i++ {
-			var id = p.id + i
-			if _, ok := reachable[id]; ok {
-				ch <- fmt.Errorf("page %d: multiple references", int(id))
-			}
-			reachable[id] = p
-		}
-
-		// We should only encounter un-freed leaf and branch pages.
-		if freed[p.id] {
-			ch <- fmt.Errorf("page %d: reachable freed", int(p.id))
-		} else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 {
-			ch <- fmt.Errorf("page %d: invalid type: %s", int(p.id), p.typ())
-		}
-	})
-
-	// Check each bucket within this bucket.
-	_ = b.ForEach(func(k, v []byte) error {
-		if child := b.Bucket(k); child != nil {
-			tx.checkBucket(child, reachable, freed, ch)
-		}
-		return nil
-	})
-}
-
-// allocate returns a contiguous block of memory starting at a given page.
-func (tx *Tx) allocate(count int) (*page, error) {
-	p, err := tx.db.allocate(count)
-	if err != nil {
-		return nil, err
-	}
-
-	// Save to our page cache.
-	tx.pages[p.id] = p
-
-	// Update statistics.
-	tx.stats.PageCount++
-	tx.stats.PageAlloc += count * tx.db.pageSize
-
-	return p, nil
-}
-
-// write writes any dirty pages to disk.
-func (tx *Tx) write() error {
-	// Sort pages by id.
-	pages := make(pages, 0, len(tx.pages))
-	for _, p := range tx.pages {
-		pages = append(pages, p)
-	}
-	// Clear out page cache early.
-	tx.pages = make(map[pgid]*page)
-	sort.Sort(pages)
-
-	// Write pages to disk in order.
-	for _, p := range pages {
-		size := (int(p.overflow) + 1) * tx.db.pageSize
-		offset := int64(p.id) * int64(tx.db.pageSize)
-
-		// Write out page in "max allocation" sized chunks.
-		ptr := (*[maxAllocSize]byte)(unsafe.Pointer(p))
-		for {
-			// Limit our write to our max allocation size.
-			sz := size
-			if sz > maxAllocSize-1 {
-				sz = maxAllocSize - 1
-			}
-
-			// Write chunk to disk.
-			buf := ptr[:sz]
-			if _, err := tx.db.ops.writeAt(buf, offset); err != nil {
-				return err
-			}
-
-			// Update statistics.
-			tx.stats.Write++
-
-			// Exit inner for loop if we've written all the chunks.
-			size -= sz
-			if size == 0 {
-				break
-			}
-
-			// Otherwise move offset forward and move pointer to next chunk.
-			offset += int64(sz)
-			ptr = (*[maxAllocSize]byte)(unsafe.Pointer(&ptr[sz]))
-		}
-	}
-
-	// Ignore file sync if flag is set on DB.
-	if !tx.db.NoSync || IgnoreNoSync {
-		if err := fdatasync(tx.db); err != nil {
-			return err
-		}
-	}
-
-	// Put small pages back to page pool.
-	for _, p := range pages {
-		// Ignore page sizes over 1 page.
-		// These are allocated using make() instead of the page pool.
-		if int(p.overflow) != 0 {
-			continue
-		}
-
-		buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:tx.db.pageSize]
-
-		// See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1
-		for i := range buf {
-			buf[i] = 0
-		}
-		tx.db.pagePool.Put(buf)
-	}
-
-	return nil
-}
-
-// writeMeta writes the meta to the disk.
-func (tx *Tx) writeMeta() error {
-	// Create a temporary buffer for the meta page.
-	buf := make([]byte, tx.db.pageSize)
-	p := tx.db.pageInBuffer(buf, 0)
-	tx.meta.write(p)
-
-	// Write the meta page to file.
-	if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil {
-		return err
-	}
-	if !tx.db.NoSync || IgnoreNoSync {
-		if err := fdatasync(tx.db); err != nil {
-			return err
-		}
-	}
-
-	// Update statistics.
-	tx.stats.Write++
-
-	return nil
-}
-
-// page returns a reference to the page with a given id.
-// If page has been written to then a temporary buffered page is returned.
-func (tx *Tx) page(id pgid) *page {
-	// Check the dirty pages first.
-	if tx.pages != nil {
-		if p, ok := tx.pages[id]; ok {
-			return p
-		}
-	}
-
-	// Otherwise return directly from the mmap.
-	return tx.db.page(id)
-}
-
-// forEachPage iterates over every page within a given page and executes a function.
-func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) {
-	p := tx.page(pgid)
-
-	// Execute function.
-	fn(p, depth)
-
-	// Recursively loop over children.
-	if (p.flags & branchPageFlag) != 0 {
-		for i := 0; i < int(p.count); i++ {
-			elem := p.branchPageElement(uint16(i))
-			tx.forEachPage(elem.pgid, depth+1, fn)
-		}
-	}
-}
-
-// Page returns page information for a given page number.
-// This is only safe for concurrent use when used by a writable transaction.
-func (tx *Tx) Page(id int) (*PageInfo, error) {
-	if tx.db == nil {
-		return nil, ErrTxClosed
-	} else if pgid(id) >= tx.meta.pgid {
-		return nil, nil
-	}
-
-	// Build the page info.
-	p := tx.db.page(pgid(id))
-	info := &PageInfo{
-		ID:            id,
-		Count:         int(p.count),
-		OverflowCount: int(p.overflow),
-	}
-
-	// Determine the type (or if it's free).
-	if tx.db.freelist.freed(pgid(id)) {
-		info.Type = "free"
-	} else {
-		info.Type = p.typ()
-	}
-
-	return info, nil
-}
-
-// TxStats represents statistics about the actions performed by the transaction.
-type TxStats struct {
-	// Page statistics.
-	PageCount int // number of page allocations
-	PageAlloc int // total bytes allocated
-
-	// Cursor statistics.
-	CursorCount int // number of cursors created
-
-	// Node statistics
-	NodeCount int // number of node allocations
-	NodeDeref int // number of node dereferences
-
-	// Rebalance statistics.
-	Rebalance     int           // number of node rebalances
-	RebalanceTime time.Duration // total time spent rebalancing
-
-	// Split/Spill statistics.
-	Split     int           // number of nodes split
-	Spill     int           // number of nodes spilled
-	SpillTime time.Duration // total time spent spilling
-
-	// Write statistics.
-	Write     int           // number of writes performed
-	WriteTime time.Duration // total time spent writing to disk
-}
-
-func (s *TxStats) add(other *TxStats) {
-	s.PageCount += other.PageCount
-	s.PageAlloc += other.PageAlloc
-	s.CursorCount += other.CursorCount
-	s.NodeCount += other.NodeCount
-	s.NodeDeref += other.NodeDeref
-	s.Rebalance += other.Rebalance
-	s.RebalanceTime += other.RebalanceTime
-	s.Split += other.Split
-	s.Spill += other.Spill
-	s.SpillTime += other.SpillTime
-	s.Write += other.Write
-	s.WriteTime += other.WriteTime
-}
-
-// Sub calculates and returns the difference between two sets of transaction stats.
-// This is useful when obtaining stats at two different points and time and
-// you need the performance counters that occurred within that time span.
-func (s *TxStats) Sub(other *TxStats) TxStats {
-	var diff TxStats
-	diff.PageCount = s.PageCount - other.PageCount
-	diff.PageAlloc = s.PageAlloc - other.PageAlloc
-	diff.CursorCount = s.CursorCount - other.CursorCount
-	diff.NodeCount = s.NodeCount - other.NodeCount
-	diff.NodeDeref = s.NodeDeref - other.NodeDeref
-	diff.Rebalance = s.Rebalance - other.Rebalance
-	diff.RebalanceTime = s.RebalanceTime - other.RebalanceTime
-	diff.Split = s.Split - other.Split
-	diff.Spill = s.Spill - other.Spill
-	diff.SpillTime = s.SpillTime - other.SpillTime
-	diff.Write = s.Write - other.Write
-	diff.WriteTime = s.WriteTime - other.WriteTime
-	return diff
-}

+ 2 - 1
vendor/github.com/containerd/containerd/README.md

@@ -2,6 +2,7 @@
 
 [![GoDoc](https://godoc.org/github.com/containerd/containerd?status.svg)](https://godoc.org/github.com/containerd/containerd)
 [![Build Status](https://travis-ci.org/containerd/containerd.svg?branch=master)](https://travis-ci.org/containerd/containerd)
+[![Windows Build Status](https://ci.appveyor.com/api/projects/status/github/containerd/containerd?branch=master&svg=true)](https://ci.appveyor.com/project/mlaventure/containerd-3g73f?branch=master)
 [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fcontainerd%2Fcontainerd.svg?type=shield)](https://app.fossa.io/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fcontainerd%2Fcontainerd?ref=badge_shield)
 [![Go Report Card](https://goreportcard.com/badge/github.com/containerd/containerd)](https://goreportcard.com/report/github.com/containerd/containerd)
 [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/1271/badge)](https://bestpractices.coreinfrastructure.org/projects/1271)
@@ -223,7 +224,7 @@ This will be the best place to discuss design and implementation.
 
 For sync communication we have a community slack with a #containerd channel that everyone is welcome to join and chat about development.
 
-**Slack:** https://dockr.ly/community
+**Slack:** https://join.slack.com/t/dockercommunity/shared_invite/enQtNDM4NjAwNDMyOTUwLWZlMDZmYWRjZjk4Zjc5ZGQ5NWZkOWI1Yjk2NGE3ZWVlYjYxM2VhYjczOWIyZDFhZTE3NTUwZWQzMjhmNGYyZTg
 
 ### Reporting security issues
 

+ 2 - 2
vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go

@@ -443,7 +443,7 @@ type ContentClient interface {
 	// Only one active stream may exist at a time for each ref.
 	//
 	// Once a write stream has started, it may only write to a single ref, thus
-	// once a stream is started, the ref may be ommitted on subsequent writes.
+	// once a stream is started, the ref may be omitted on subsequent writes.
 	//
 	// For any write transaction represented by a ref, only a single write may
 	// be made to a given offset. If overlapping writes occur, it is an error.
@@ -658,7 +658,7 @@ type ContentServer interface {
 	// Only one active stream may exist at a time for each ref.
 	//
 	// Once a write stream has started, it may only write to a single ref, thus
-	// once a stream is started, the ref may be ommitted on subsequent writes.
+	// once a stream is started, the ref may be omitted on subsequent writes.
 	//
 	// For any write transaction represented by a ref, only a single write may
 	// be made to a given offset. If overlapping writes occur, it is an error.

+ 1 - 1
vendor/github.com/containerd/containerd/api/services/content/v1/content.proto

@@ -55,7 +55,7 @@ service Content {
 	// Only one active stream may exist at a time for each ref.
 	//
 	// Once a write stream has started, it may only write to a single ref, thus
-	// once a stream is started, the ref may be ommitted on subsequent writes.
+	// once a stream is started, the ref may be omitted on subsequent writes.
 	//
 	// For any write transaction represented by a ref, only a single write may
 	// be made to a given offset. If overlapping writes occur, it is an error.

+ 2 - 2
vendor/github.com/containerd/containerd/api/services/events/v1/events.pb.go

@@ -141,7 +141,7 @@ type EventsClient interface {
 	// Forward sends an event that has already been packaged into an envelope
 	// with a timestamp and namespace.
 	//
-	// This is useful if earlier timestamping is required or when fowarding on
+	// This is useful if earlier timestamping is required or when forwarding on
 	// behalf of another component, namespace or publisher.
 	Forward(ctx context.Context, in *ForwardRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error)
 	// Subscribe to a stream of events, possibly returning only that match any
@@ -223,7 +223,7 @@ type EventsServer interface {
 	// Forward sends an event that has already been packaged into an envelope
 	// with a timestamp and namespace.
 	//
-	// This is useful if earlier timestamping is required or when fowarding on
+	// This is useful if earlier timestamping is required or when forwarding on
 	// behalf of another component, namespace or publisher.
 	Forward(context.Context, *ForwardRequest) (*google_protobuf2.Empty, error)
 	// Subscribe to a stream of events, possibly returning only that match any

+ 1 - 1
vendor/github.com/containerd/containerd/api/services/events/v1/events.proto

@@ -20,7 +20,7 @@ service Events {
 	// Forward sends an event that has already been packaged into an envelope
 	// with a timestamp and namespace.
 	//
-	// This is useful if earlier timestamping is required or when fowarding on
+	// This is useful if earlier timestamping is required or when forwarding on
 	// behalf of another component, namespace or publisher.
 	rpc Forward(ForwardRequest) returns (google.protobuf.Empty);
 

+ 86 - 2
vendor/github.com/containerd/containerd/archive/compression/compression.go

@@ -20,9 +20,15 @@ import (
 	"bufio"
 	"bytes"
 	"compress/gzip"
+	"context"
 	"fmt"
 	"io"
+	"os"
+	"os/exec"
+	"strconv"
 	"sync"
+
+	"github.com/containerd/containerd/log"
 )
 
 type (
@@ -37,6 +43,13 @@ const (
 	Gzip
 )
 
+const disablePigzEnv = "CONTAINERD_DISABLE_PIGZ"
+
+var (
+	initPigz   sync.Once
+	unpigzPath string
+)
+
 var (
 	bufioReader32KPool = &sync.Pool{
 		New: func() interface{} { return bufio.NewReaderSize(nil, 32*1024) },
@@ -120,11 +133,18 @@ func DecompressStream(archive io.Reader) (DecompressReadCloser, error) {
 		readBufWrapper := &readCloserWrapper{buf, compression, closer}
 		return readBufWrapper, nil
 	case Gzip:
-		gzReader, err := gzip.NewReader(buf)
+		ctx, cancel := context.WithCancel(context.Background())
+		gzReader, err := gzipDecompress(ctx, buf)
 		if err != nil {
+			cancel()
 			return nil, err
 		}
-		readBufWrapper := &readCloserWrapper{gzReader, compression, closer}
+
+		readBufWrapper := &readCloserWrapper{gzReader, compression, func() error {
+			cancel()
+			return closer()
+		}}
+
 		return readBufWrapper, nil
 	default:
 		return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension())
@@ -151,3 +171,67 @@ func (compression *Compression) Extension() string {
 	}
 	return ""
 }
+
+func gzipDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) {
+	initPigz.Do(func() {
+		if unpigzPath = detectPigz(); unpigzPath != "" {
+			log.L.Debug("using pigz for decompression")
+		}
+	})
+
+	if unpigzPath == "" {
+		return gzip.NewReader(buf)
+	}
+
+	return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf)
+}
+
+func cmdStream(cmd *exec.Cmd, in io.Reader) (io.ReadCloser, error) {
+	reader, writer := io.Pipe()
+
+	cmd.Stdin = in
+	cmd.Stdout = writer
+
+	var errBuf bytes.Buffer
+	cmd.Stderr = &errBuf
+
+	if err := cmd.Start(); err != nil {
+		return nil, err
+	}
+
+	go func() {
+		if err := cmd.Wait(); err != nil {
+			writer.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String()))
+		} else {
+			writer.Close()
+		}
+	}()
+
+	return reader, nil
+}
+
+func detectPigz() string {
+	path, err := exec.LookPath("unpigz")
+	if err != nil {
+		log.L.WithError(err).Debug("unpigz not found, falling back to go gzip")
+		return ""
+	}
+
+	// Check if pigz disabled via CONTAINERD_DISABLE_PIGZ env variable
+	value := os.Getenv(disablePigzEnv)
+	if value == "" {
+		return path
+	}
+
+	disable, err := strconv.ParseBool(value)
+	if err != nil {
+		log.L.WithError(err).Warnf("could not parse %s: %s", disablePigzEnv, value)
+		return path
+	}
+
+	if disable {
+		return ""
+	}
+
+	return path
+}

+ 9 - 0
vendor/github.com/containerd/containerd/cio/io.go

@@ -141,6 +141,15 @@ func NewCreator(opts ...Opt) Creator {
 		if err != nil {
 			return nil, err
 		}
+		if streams.Stdin == nil {
+			fifos.Stdin = ""
+		}
+		if streams.Stdout == nil {
+			fifos.Stdout = ""
+		}
+		if streams.Stderr == nil {
+			fifos.Stderr = ""
+		}
 		return copyIO(fifos, streams)
 	}
 }

+ 0 - 58
vendor/github.com/containerd/containerd/container_opts_unix.go

@@ -20,25 +20,21 @@ package containerd
 
 import (
 	"context"
-	"encoding/json"
 	"fmt"
 	"os"
 	"path/filepath"
 	"syscall"
 
-	"github.com/containerd/containerd/api/types"
 	"github.com/containerd/containerd/containers"
 	"github.com/containerd/containerd/content"
 	"github.com/containerd/containerd/errdefs"
 	"github.com/containerd/containerd/images"
 	"github.com/containerd/containerd/mount"
 	"github.com/containerd/containerd/platforms"
-	"github.com/containerd/containerd/runtime/linux/runctypes"
 	"github.com/gogo/protobuf/proto"
 	protobuf "github.com/gogo/protobuf/types"
 	"github.com/opencontainers/image-spec/identity"
 	"github.com/opencontainers/image-spec/specs-go/v1"
-	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/pkg/errors"
 )
 
@@ -105,44 +101,6 @@ func WithCheckpoint(im Image, snapshotKey string) NewContainerOpts {
 	}
 }
 
-// WithTaskCheckpoint allows a task to be created with live runtime and memory data from a
-// previous checkpoint. Additional software such as CRIU may be required to
-// restore a task from a checkpoint
-func WithTaskCheckpoint(im Image) NewTaskOpts {
-	return func(ctx context.Context, c *Client, info *TaskInfo) error {
-		desc := im.Target()
-		id := desc.Digest
-		index, err := decodeIndex(ctx, c.ContentStore(), desc)
-		if err != nil {
-			return err
-		}
-		for _, m := range index.Manifests {
-			if m.MediaType == images.MediaTypeContainerd1Checkpoint {
-				info.Checkpoint = &types.Descriptor{
-					MediaType: m.MediaType,
-					Size_:     m.Size,
-					Digest:    m.Digest,
-				}
-				return nil
-			}
-		}
-		return fmt.Errorf("checkpoint not found in index %s", id)
-	}
-}
-
-func decodeIndex(ctx context.Context, store content.Provider, desc ocispec.Descriptor) (*v1.Index, error) {
-	var index v1.Index
-	p, err := content.ReadBlob(ctx, store, desc)
-	if err != nil {
-		return nil, err
-	}
-	if err := json.Unmarshal(p, &index); err != nil {
-		return nil, err
-	}
-
-	return &index, nil
-}
-
 // WithRemappedSnapshot creates a new snapshot and remaps the uid/gid for the
 // filesystem to be used by a container with user namespaces
 func WithRemappedSnapshot(id string, i Image, uid, gid uint32) NewContainerOpts {
@@ -221,19 +179,3 @@ func incrementFS(root string, uidInc, gidInc uint32) filepath.WalkFunc {
 		return os.Lchown(path, u, g)
 	}
 }
-
-// WithNoPivotRoot instructs the runtime not to you pivot_root
-func WithNoPivotRoot(_ context.Context, _ *Client, info *TaskInfo) error {
-	if info.Options == nil {
-		info.Options = &runctypes.CreateOptions{
-			NoPivotRoot: true,
-		}
-		return nil
-	}
-	copts, ok := info.Options.(*runctypes.CreateOptions)
-	if !ok {
-		return errors.New("invalid options type, expected runctypes.CreateOptions")
-	}
-	copts.NoPivotRoot = true
-	return nil
-}

+ 3 - 3
vendor/github.com/containerd/containerd/content/helpers.go

@@ -70,7 +70,7 @@ func WriteBlob(ctx context.Context, cs Ingester, ref string, r io.Reader, desc o
 	cw, err := OpenWriter(ctx, cs, WithRef(ref), WithDescriptor(desc))
 	if err != nil {
 		if !errdefs.IsAlreadyExists(err) {
-			return err
+			return errors.Wrap(err, "failed to open writer")
 		}
 
 		return nil // all ready present
@@ -127,7 +127,7 @@ func OpenWriter(ctx context.Context, cs Ingester, opts ...WriterOpt) (Writer, er
 func Copy(ctx context.Context, cw Writer, r io.Reader, size int64, expected digest.Digest, opts ...Opt) error {
 	ws, err := cw.Status()
 	if err != nil {
-		return err
+		return errors.Wrap(err, "failed to get status")
 	}
 
 	if ws.Offset > 0 {
@@ -138,7 +138,7 @@ func Copy(ctx context.Context, cw Writer, r io.Reader, size int64, expected dige
 	}
 
 	if _, err := copyWithBuffer(cw, r); err != nil {
-		return err
+		return errors.Wrap(err, "failed to copy")
 	}
 
 	if err := cw.Commit(ctx, size, expected, opts...); err != nil {

+ 3 - 1
vendor/github.com/containerd/containerd/content/local/store.go

@@ -33,6 +33,8 @@ import (
 	"github.com/containerd/containerd/errdefs"
 	"github.com/containerd/containerd/filters"
 	"github.com/containerd/containerd/log"
+
+	"github.com/containerd/continuity"
 	digest "github.com/opencontainers/go-digest"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/pkg/errors"
@@ -651,5 +653,5 @@ func writeTimestampFile(p string, t time.Time) error {
 		return err
 	}
 
-	return ioutil.WriteFile(p, b, 0666)
+	return continuity.AtomicWriteFile(p, b, 0666)
 }

+ 4 - 4
vendor/github.com/containerd/containerd/content/local/writer.go

@@ -132,11 +132,11 @@ func (w *writer) Commit(ctx context.Context, size int64, expected digest.Digest,
 	// clean up!!
 	defer os.RemoveAll(w.path)
 
+	if _, err := os.Stat(target); err == nil {
+		// collision with the target file!
+		return errors.Wrapf(errdefs.ErrAlreadyExists, "content %v", dgst)
+	}
 	if err := os.Rename(ingest, target); err != nil {
-		if os.IsExist(err) {
-			// collision with the target file!
-			return errors.Wrapf(errdefs.ErrAlreadyExists, "content %v", dgst)
-		}
 		return err
 	}
 	commitTime := time.Now()

+ 3 - 3
vendor/github.com/containerd/containerd/content/proxy/content_writer.go

@@ -57,7 +57,7 @@ func (rw *remoteWriter) Status() (content.Status, error) {
 		Action: contentapi.WriteActionStat,
 	})
 	if err != nil {
-		return content.Status{}, errors.Wrap(err, "error getting writer status")
+		return content.Status{}, errors.Wrap(errdefs.FromGRPC(err), "error getting writer status")
 	}
 
 	return content.Status{
@@ -82,7 +82,7 @@ func (rw *remoteWriter) Write(p []byte) (n int, err error) {
 		Data:   p,
 	})
 	if err != nil {
-		return 0, err
+		return 0, errors.Wrap(errdefs.FromGRPC(err), "failed to send write")
 	}
 
 	n = int(resp.Offset - offset)
@@ -112,7 +112,7 @@ func (rw *remoteWriter) Commit(ctx context.Context, size int64, expected digest.
 		Labels:   base.Labels,
 	})
 	if err != nil {
-		return errdefs.FromGRPC(err)
+		return errors.Wrap(errdefs.FromGRPC(err), "commit failed")
 	}
 
 	if size != 0 && resp.Offset != size {

+ 1 - 1
vendor/github.com/containerd/containerd/contrib/seccomp/seccomp.go

@@ -30,7 +30,7 @@ import (
 )
 
 // WithProfile receives the name of a file stored on disk comprising a json
-// formated seccomp profile, as specified by the opencontainers/runtime-spec.
+// formatted seccomp profile, as specified by the opencontainers/runtime-spec.
 // The profile is read from the file, unmarshaled, and set to the spec.
 func WithProfile(profile string) oci.SpecOpts {
 	return func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error {

+ 1 - 1
vendor/github.com/containerd/containerd/events/exchange/exchange.go

@@ -52,7 +52,7 @@ var _ events.Subscriber = &Exchange{}
 
 // Forward accepts an envelope to be direcly distributed on the exchange.
 //
-// This is useful when an event is forwaded on behalf of another namespace or
+// This is useful when an event is forwarded on behalf of another namespace or
 // when the event is propagated on behalf of another publisher.
 func (e *Exchange) Forward(ctx context.Context, envelope *events.Envelope) (err error) {
 	if err := validateEnvelope(envelope); err != nil {

+ 58 - 0
vendor/github.com/containerd/containerd/export.go

@@ -0,0 +1,58 @@
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package containerd
+
+import (
+	"context"
+	"io"
+
+	"github.com/containerd/containerd/images"
+	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+	"github.com/pkg/errors"
+)
+
+type exportOpts struct {
+}
+
+// ExportOpt allows the caller to specify export-specific options
+type ExportOpt func(c *exportOpts) error
+
+func resolveExportOpt(opts ...ExportOpt) (exportOpts, error) {
+	var eopts exportOpts
+	for _, o := range opts {
+		if err := o(&eopts); err != nil {
+			return eopts, err
+		}
+	}
+	return eopts, nil
+}
+
+// Export exports an image to a Tar stream.
+// OCI format is used by default.
+// It is up to caller to put "org.opencontainers.image.ref.name" annotation to desc.
+// TODO(AkihiroSuda): support exporting multiple descriptors at once to a single archive stream.
+func (c *Client) Export(ctx context.Context, exporter images.Exporter, desc ocispec.Descriptor, opts ...ExportOpt) (io.ReadCloser, error) {
+	_, err := resolveExportOpt(opts...) // unused now
+	if err != nil {
+		return nil, err
+	}
+	pr, pw := io.Pipe()
+	go func() {
+		pw.CloseWithError(errors.Wrap(exporter.Export(ctx, c.ContentStore(), desc, pw), "export failed"))
+	}()
+	return pr, nil
+}

+ 254 - 0
vendor/github.com/containerd/containerd/images/archive/importer.go

@@ -0,0 +1,254 @@
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+// Package archive provides a Docker and OCI compatible importer
+package archive
+
+import (
+	"archive/tar"
+	"bytes"
+	"context"
+	"encoding/json"
+	"io"
+	"io/ioutil"
+	"path"
+
+	"github.com/containerd/containerd/archive/compression"
+	"github.com/containerd/containerd/content"
+	"github.com/containerd/containerd/images"
+	"github.com/containerd/containerd/log"
+	digest "github.com/opencontainers/go-digest"
+	specs "github.com/opencontainers/image-spec/specs-go"
+	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+	"github.com/pkg/errors"
+)
+
+// ImportIndex imports an index from a tar achive image bundle
+// - implements Docker v1.1, v1.2 and OCI v1.
+// - prefers OCI v1 when provided
+// - creates OCI index for Docker formats
+// - normalizes Docker references and adds as OCI ref name
+//      e.g. alpine:latest -> docker.io/library/alpine:latest
+// - existing OCI reference names are untouched
+// - TODO: support option to compress layers on ingest
+func ImportIndex(ctx context.Context, store content.Store, reader io.Reader) (ocispec.Descriptor, error) {
+	var (
+		tr = tar.NewReader(reader)
+
+		ociLayout ocispec.ImageLayout
+		mfsts     []struct {
+			Config   string
+			RepoTags []string
+			Layers   []string
+		}
+		symlinks = make(map[string]string)
+		blobs    = make(map[string]ocispec.Descriptor)
+	)
+	for {
+		hdr, err := tr.Next()
+		if err == io.EOF {
+			break
+		}
+		if err != nil {
+			return ocispec.Descriptor{}, err
+		}
+		if hdr.Typeflag == tar.TypeSymlink {
+			symlinks[hdr.Name] = path.Join(path.Dir(hdr.Name), hdr.Linkname)
+		}
+
+		if hdr.Typeflag != tar.TypeReg && hdr.Typeflag != tar.TypeRegA {
+			if hdr.Typeflag != tar.TypeDir {
+				log.G(ctx).WithField("file", hdr.Name).Debug("file type ignored")
+			}
+			continue
+		}
+
+		hdrName := path.Clean(hdr.Name)
+		if hdrName == ocispec.ImageLayoutFile {
+			if err = onUntarJSON(tr, &ociLayout); err != nil {
+				return ocispec.Descriptor{}, errors.Wrapf(err, "untar oci layout %q", hdr.Name)
+			}
+		} else if hdrName == "manifest.json" {
+			if err = onUntarJSON(tr, &mfsts); err != nil {
+				return ocispec.Descriptor{}, errors.Wrapf(err, "untar manifest %q", hdr.Name)
+			}
+		} else {
+			dgst, err := onUntarBlob(ctx, tr, store, hdr.Size, "tar-"+hdrName)
+			if err != nil {
+				return ocispec.Descriptor{}, errors.Wrapf(err, "failed to ingest %q", hdr.Name)
+			}
+
+			blobs[hdrName] = ocispec.Descriptor{
+				Digest: dgst,
+				Size:   hdr.Size,
+			}
+		}
+	}
+
+	// If OCI layout was given, interpret the tar as an OCI layout.
+	// When not provided, the layout of the tar will be interpretted
+	// as Docker v1.1 or v1.2.
+	if ociLayout.Version != "" {
+		if ociLayout.Version != ocispec.ImageLayoutVersion {
+			return ocispec.Descriptor{}, errors.Errorf("unsupported OCI version %s", ociLayout.Version)
+		}
+
+		idx, ok := blobs["index.json"]
+		if !ok {
+			return ocispec.Descriptor{}, errors.Errorf("missing index.json in OCI layout %s", ocispec.ImageLayoutVersion)
+		}
+
+		idx.MediaType = ocispec.MediaTypeImageIndex
+		return idx, nil
+	}
+
+	for name, linkname := range symlinks {
+		desc, ok := blobs[linkname]
+		if !ok {
+			return ocispec.Descriptor{}, errors.Errorf("no target for symlink layer from %q to %q", name, linkname)
+		}
+		blobs[name] = desc
+	}
+
+	var idx ocispec.Index
+	for _, mfst := range mfsts {
+		config, ok := blobs[mfst.Config]
+		if !ok {
+			return ocispec.Descriptor{}, errors.Errorf("image config %q not found", mfst.Config)
+		}
+		config.MediaType = ocispec.MediaTypeImageConfig
+
+		layers, err := resolveLayers(ctx, store, mfst.Layers, blobs)
+		if err != nil {
+			return ocispec.Descriptor{}, errors.Wrap(err, "failed to resolve layers")
+		}
+
+		manifest := ocispec.Manifest{
+			Versioned: specs.Versioned{
+				SchemaVersion: 2,
+			},
+			Config: config,
+			Layers: layers,
+		}
+
+		desc, err := writeManifest(ctx, store, manifest, ocispec.MediaTypeImageManifest)
+		if err != nil {
+			return ocispec.Descriptor{}, errors.Wrap(err, "write docker manifest")
+		}
+
+		platforms, err := images.Platforms(ctx, store, desc)
+		if err != nil {
+			return ocispec.Descriptor{}, errors.Wrap(err, "unable to resolve platform")
+		}
+		if len(platforms) > 0 {
+			// Only one platform can be resolved from non-index manifest,
+			// The platform can only come from the config included above,
+			// if the config has no platform it can be safely ommitted.
+			desc.Platform = &platforms[0]
+		}
+
+		if len(mfst.RepoTags) == 0 {
+			idx.Manifests = append(idx.Manifests, desc)
+		} else {
+			// Add descriptor per tag
+			for _, ref := range mfst.RepoTags {
+				mfstdesc := desc
+
+				normalized, err := normalizeReference(ref)
+				if err != nil {
+					return ocispec.Descriptor{}, err
+				}
+
+				mfstdesc.Annotations = map[string]string{
+					ocispec.AnnotationRefName: normalized,
+				}
+
+				idx.Manifests = append(idx.Manifests, mfstdesc)
+			}
+		}
+	}
+
+	return writeManifest(ctx, store, idx, ocispec.MediaTypeImageIndex)
+}
+
+func onUntarJSON(r io.Reader, j interface{}) error {
+	b, err := ioutil.ReadAll(r)
+	if err != nil {
+		return err
+	}
+	if err := json.Unmarshal(b, j); err != nil {
+		return err
+	}
+	return nil
+}
+
+func onUntarBlob(ctx context.Context, r io.Reader, store content.Ingester, size int64, ref string) (digest.Digest, error) {
+	dgstr := digest.Canonical.Digester()
+
+	if err := content.WriteBlob(ctx, store, ref, io.TeeReader(r, dgstr.Hash()), ocispec.Descriptor{Size: size}); err != nil {
+		return "", err
+	}
+
+	return dgstr.Digest(), nil
+}
+
+func resolveLayers(ctx context.Context, store content.Store, layerFiles []string, blobs map[string]ocispec.Descriptor) ([]ocispec.Descriptor, error) {
+	var layers []ocispec.Descriptor
+	for _, f := range layerFiles {
+		desc, ok := blobs[f]
+		if !ok {
+			return nil, errors.Errorf("layer %q not found", f)
+		}
+
+		// Open blob, resolve media type
+		ra, err := store.ReaderAt(ctx, desc)
+		if err != nil {
+			return nil, errors.Wrapf(err, "failed to open %q (%s)", f, desc.Digest)
+		}
+		s, err := compression.DecompressStream(content.NewReader(ra))
+		if err != nil {
+			return nil, errors.Wrapf(err, "failed to detect compression for %q", f)
+		}
+		if s.GetCompression() == compression.Uncompressed {
+			// TODO: Support compressing and writing back to content store
+			desc.MediaType = ocispec.MediaTypeImageLayer
+		} else {
+			desc.MediaType = ocispec.MediaTypeImageLayerGzip
+		}
+		s.Close()
+
+		layers = append(layers, desc)
+	}
+	return layers, nil
+}
+
+func writeManifest(ctx context.Context, cs content.Ingester, manifest interface{}, mediaType string) (ocispec.Descriptor, error) {
+	manifestBytes, err := json.Marshal(manifest)
+	if err != nil {
+		return ocispec.Descriptor{}, err
+	}
+
+	desc := ocispec.Descriptor{
+		MediaType: mediaType,
+		Digest:    digest.FromBytes(manifestBytes),
+		Size:      int64(len(manifestBytes)),
+	}
+	if err := content.WriteBlob(ctx, cs, "manifest-"+desc.Digest.String(), bytes.NewReader(manifestBytes), desc); err != nil {
+		return ocispec.Descriptor{}, err
+	}
+
+	return desc, nil
+}

+ 86 - 0
vendor/github.com/containerd/containerd/images/archive/reference.go

@@ -0,0 +1,86 @@
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package archive
+
+import (
+	"strings"
+
+	"github.com/containerd/cri/pkg/util"
+	digest "github.com/opencontainers/go-digest"
+	"github.com/pkg/errors"
+)
+
+// FilterRefPrefix restricts references to having the given image
+// prefix. Tag-only references will have the prefix prepended.
+func FilterRefPrefix(image string) func(string) string {
+	return refTranslator(image, true)
+}
+
+// AddRefPrefix prepends the given image prefix to tag-only references,
+// while leaving returning full references unmodified.
+func AddRefPrefix(image string) func(string) string {
+	return refTranslator(image, false)
+}
+
+// refTranslator creates a reference which only has a tag or verifies
+// a full reference.
+func refTranslator(image string, checkPrefix bool) func(string) string {
+	return func(ref string) string {
+		// Check if ref is full reference
+		if strings.ContainsAny(ref, "/:@") {
+			// If not prefixed, don't include image
+			if checkPrefix && !isImagePrefix(ref, image) {
+				return ""
+			}
+			return ref
+		}
+		return image + ":" + ref
+	}
+}
+
+func isImagePrefix(s, prefix string) bool {
+	if !strings.HasPrefix(s, prefix) {
+		return false
+	}
+	if len(s) > len(prefix) {
+		switch s[len(prefix)] {
+		case '/', ':', '@':
+			// Prevent matching partial namespaces
+		default:
+			return false
+		}
+	}
+	return true
+}
+
+func normalizeReference(ref string) (string, error) {
+	// TODO: Replace this function to not depend on reference package
+	normalized, err := util.NormalizeImageRef(ref)
+	if err != nil {
+		return "", errors.Wrapf(err, "normalize image ref %q", ref)
+	}
+
+	return normalized.String(), nil
+}
+
+// DigestTranslator creates a digest reference by adding the
+// digest to an image name
+func DigestTranslator(prefix string) func(digest.Digest) string {
+	return func(dgst digest.Digest) string {
+		return prefix + "@" + dgst.String()
+	}
+}

+ 1 - 1
vendor/github.com/containerd/containerd/images/importexport.go

@@ -27,7 +27,7 @@ import (
 // Importer is the interface for image importer.
 type Importer interface {
 	// Import imports an image from a tar stream.
-	Import(ctx context.Context, store content.Store, reader io.Reader) ([]Image, error)
+	Import(ctx context.Context, store content.Store, reader io.Reader) (ocispec.Descriptor, error)
 }
 
 // Exporter is the interface for image exporter.

+ 104 - 56
vendor/github.com/containerd/containerd/import.go

@@ -18,36 +18,61 @@ package containerd
 
 import (
 	"context"
+	"encoding/json"
 	"io"
 
+	"github.com/containerd/containerd/content"
 	"github.com/containerd/containerd/errdefs"
 	"github.com/containerd/containerd/images"
+	"github.com/containerd/containerd/images/archive"
+	digest "github.com/opencontainers/go-digest"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 )
 
 type importOpts struct {
+	indexName string
+	imageRefT func(string) string
+	dgstRefT  func(digest.Digest) string
 }
 
 // ImportOpt allows the caller to specify import specific options
-type ImportOpt func(c *importOpts) error
+type ImportOpt func(*importOpts) error
+
+// WithImageRefTranslator is used to translate the index reference
+// to an image reference for the image store.
+func WithImageRefTranslator(f func(string) string) ImportOpt {
+	return func(c *importOpts) error {
+		c.imageRefT = f
+		return nil
+	}
+}
 
-func resolveImportOpt(opts ...ImportOpt) (importOpts, error) {
-	var iopts importOpts
-	for _, o := range opts {
-		if err := o(&iopts); err != nil {
-			return iopts, err
-		}
+// WithDigestRef is used to create digest images for each
+// manifest in the index.
+func WithDigestRef(f func(digest.Digest) string) ImportOpt {
+	return func(c *importOpts) error {
+		c.dgstRefT = f
+		return nil
+	}
+}
+
+// WithIndexName creates a tag pointing to the imported index
+func WithIndexName(name string) ImportOpt {
+	return func(c *importOpts) error {
+		c.indexName = name
+		return nil
 	}
-	return iopts, nil
 }
 
 // Import imports an image from a Tar stream using reader.
 // Caller needs to specify importer. Future version may use oci.v1 as the default.
 // Note that unreferrenced blobs may be imported to the content store as well.
-func (c *Client) Import(ctx context.Context, importer images.Importer, reader io.Reader, opts ...ImportOpt) ([]Image, error) {
-	_, err := resolveImportOpt(opts...) // unused now
-	if err != nil {
-		return nil, err
+func (c *Client) Import(ctx context.Context, reader io.Reader, opts ...ImportOpt) ([]images.Image, error) {
+	var iopts importOpts
+	for _, o := range opts {
+		if err := o(&iopts); err != nil {
+			return nil, err
+		}
 	}
 
 	ctx, done, err := c.WithLease(ctx)
@@ -56,63 +81,86 @@ func (c *Client) Import(ctx context.Context, importer images.Importer, reader io
 	}
 	defer done(ctx)
 
-	imgrecs, err := importer.Import(ctx, c.ContentStore(), reader)
+	index, err := archive.ImportIndex(ctx, c.ContentStore(), reader)
 	if err != nil {
-		// is.Update() is not called on error
 		return nil, err
 	}
 
-	is := c.ImageService()
-	var images []Image
-	for _, imgrec := range imgrecs {
-		if updated, err := is.Update(ctx, imgrec, "target"); err != nil {
-			if !errdefs.IsNotFound(err) {
-				return nil, err
-			}
+	var (
+		imgs []images.Image
+		cs   = c.ContentStore()
+		is   = c.ImageService()
+	)
+
+	if iopts.indexName != "" {
+		imgs = append(imgs, images.Image{
+			Name:   iopts.indexName,
+			Target: index,
+		})
+	}
 
-			created, err := is.Create(ctx, imgrec)
-			if err != nil {
-				return nil, err
-			}
+	var handler images.HandlerFunc
+	handler = func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
+		// Only save images at top level
+		if desc.Digest != index.Digest {
+			return images.Children(ctx, cs, desc)
+		}
+
+		p, err := content.ReadBlob(ctx, cs, desc)
+		if err != nil {
+			return nil, err
+		}
 
-			imgrec = created
-		} else {
-			imgrec = updated
+		var idx ocispec.Index
+		if err := json.Unmarshal(p, &idx); err != nil {
+			return nil, err
 		}
 
-		images = append(images, NewImage(c, imgrec))
+		for _, m := range idx.Manifests {
+			if ref := m.Annotations[ocispec.AnnotationRefName]; ref != "" {
+				if iopts.imageRefT != nil {
+					ref = iopts.imageRefT(ref)
+				}
+				if ref != "" {
+					imgs = append(imgs, images.Image{
+						Name:   ref,
+						Target: m,
+					})
+				}
+			}
+			if iopts.dgstRefT != nil {
+				ref := iopts.dgstRefT(m.Digest)
+				if ref != "" {
+					imgs = append(imgs, images.Image{
+						Name:   ref,
+						Target: m,
+					})
+				}
+			}
+		}
+
+		return idx.Manifests, nil
 	}
-	return images, nil
-}
 
-type exportOpts struct {
-}
+	handler = images.SetChildrenLabels(cs, handler)
+	if err := images.Walk(ctx, handler, index); err != nil {
+		return nil, err
+	}
 
-// ExportOpt allows the caller to specify export-specific options
-type ExportOpt func(c *exportOpts) error
+	for i := range imgs {
+		img, err := is.Update(ctx, imgs[i], "target")
+		if err != nil {
+			if !errdefs.IsNotFound(err) {
+				return nil, err
+			}
 
-func resolveExportOpt(opts ...ExportOpt) (exportOpts, error) {
-	var eopts exportOpts
-	for _, o := range opts {
-		if err := o(&eopts); err != nil {
-			return eopts, err
+			img, err = is.Create(ctx, imgs[i])
+			if err != nil {
+				return nil, err
+			}
 		}
+		imgs[i] = img
 	}
-	return eopts, nil
-}
 
-// Export exports an image to a Tar stream.
-// OCI format is used by default.
-// It is up to caller to put "org.opencontainers.image.ref.name" annotation to desc.
-// TODO(AkihiroSuda): support exporting multiple descriptors at once to a single archive stream.
-func (c *Client) Export(ctx context.Context, exporter images.Exporter, desc ocispec.Descriptor, opts ...ExportOpt) (io.ReadCloser, error) {
-	_, err := resolveExportOpt(opts...) // unused now
-	if err != nil {
-		return nil, err
-	}
-	pr, pw := io.Pipe()
-	go func() {
-		pw.CloseWithError(exporter.Export(ctx, c.ContentStore(), desc, pw))
-	}()
-	return pr, nil
+	return imgs, nil
 }

+ 26 - 15
vendor/github.com/containerd/containerd/install.go

@@ -33,25 +33,14 @@ import (
 
 // Install a binary image into the opt service
 func (c *Client) Install(ctx context.Context, image Image, opts ...InstallOpts) error {
-	resp, err := c.IntrospectionService().Plugins(ctx, &introspectionapi.PluginsRequest{
-		Filters: []string{
-			"id==opt",
-		},
-	})
-	if err != nil {
-		return err
-	}
-	if len(resp.Plugins) != 1 {
-		return errors.New("opt service not enabled")
-	}
-	path := resp.Plugins[0].Exports["path"]
-	if path == "" {
-		return errors.New("opt path not exported")
-	}
 	var config InstallConfig
 	for _, o := range opts {
 		o(&config)
 	}
+	path, err := c.getInstallPath(ctx, config)
+	if err != nil {
+		return err
+	}
 	var (
 		cs       = image.ContentStore()
 		platform = platforms.Default()
@@ -89,3 +78,25 @@ func (c *Client) Install(ctx context.Context, image Image, opts ...InstallOpts)
 	}
 	return nil
 }
+
+func (c *Client) getInstallPath(ctx context.Context, config InstallConfig) (string, error) {
+	if config.Path != "" {
+		return config.Path, nil
+	}
+	resp, err := c.IntrospectionService().Plugins(ctx, &introspectionapi.PluginsRequest{
+		Filters: []string{
+			"id==opt",
+		},
+	})
+	if err != nil {
+		return "", err
+	}
+	if len(resp.Plugins) != 1 {
+		return "", errors.New("opt service not enabled")
+	}
+	path := resp.Plugins[0].Exports["path"]
+	if path == "" {
+		return "", errors.New("opt path not exported")
+	}
+	return path, nil
+}

+ 9 - 0
vendor/github.com/containerd/containerd/install_opts.go

@@ -25,6 +25,8 @@ type InstallConfig struct {
 	Libs bool
 	// Replace will overwrite existing binaries or libs in the opt directory
 	Replace bool
+	// Path to install libs and binaries to
+	Path string
 }
 
 // WithInstallLibs installs libs from the image
@@ -36,3 +38,10 @@ func WithInstallLibs(c *InstallConfig) {
 func WithInstallReplace(c *InstallConfig) {
 	c.Replace = true
 }
+
+// WithInstallPath sets the optional install path
+func WithInstallPath(path string) InstallOpts {
+	return func(c *InstallConfig) {
+		c.Path = path
+	}
+}

+ 1 - 1
vendor/github.com/containerd/containerd/metadata/bolt.go

@@ -19,8 +19,8 @@ package metadata
 import (
 	"context"
 
-	"github.com/boltdb/bolt"
 	"github.com/pkg/errors"
+	bolt "go.etcd.io/bbolt"
 )
 
 type transactionKey struct{}

+ 1 - 1
vendor/github.com/containerd/containerd/metadata/boltutil/helpers.go

@@ -19,8 +19,8 @@ package boltutil
 import (
 	"time"
 
-	"github.com/boltdb/bolt"
 	"github.com/pkg/errors"
+	bolt "go.etcd.io/bbolt"
 )
 
 var (

+ 4 - 4
vendor/github.com/containerd/containerd/metadata/buckets.go

@@ -17,11 +17,11 @@
 package metadata
 
 import (
-	"github.com/boltdb/bolt"
 	digest "github.com/opencontainers/go-digest"
+	bolt "go.etcd.io/bbolt"
 )
 
-// The layout where a "/" delineates a bucket is desribed in the following
+// The layout where a "/" delineates a bucket is described in the following
 // section. Please try to follow this as closely as possible when adding
 // functionality. We can bolster this with helpers and more structure if that
 // becomes an issue.
@@ -164,11 +164,11 @@ func getSnapshotterBucket(tx *bolt.Tx, namespace, snapshotter string) *bolt.Buck
 }
 
 func createBlobBucket(tx *bolt.Tx, namespace string, dgst digest.Digest) (*bolt.Bucket, error) {
-	bkt, err := createBucketIfNotExists(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectContent, bucketKeyObjectBlob, []byte(dgst.String()))
+	bkt, err := createBucketIfNotExists(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectContent, bucketKeyObjectBlob)
 	if err != nil {
 		return nil, err
 	}
-	return bkt, nil
+	return bkt.CreateBucket([]byte(dgst.String()))
 }
 
 func getBlobsBucket(tx *bolt.Tx, namespace string) *bolt.Bucket {

+ 1 - 1
vendor/github.com/containerd/containerd/metadata/containers.go

@@ -21,7 +21,6 @@ import (
 	"strings"
 	"time"
 
-	"github.com/boltdb/bolt"
 	"github.com/containerd/containerd/containers"
 	"github.com/containerd/containerd/errdefs"
 	"github.com/containerd/containerd/filters"
@@ -32,6 +31,7 @@ import (
 	"github.com/gogo/protobuf/proto"
 	"github.com/gogo/protobuf/types"
 	"github.com/pkg/errors"
+	bolt "go.etcd.io/bbolt"
 )
 
 type containerStore struct {

+ 6 - 11
vendor/github.com/containerd/containerd/metadata/content.go

@@ -23,7 +23,6 @@ import (
 	"sync"
 	"time"
 
-	"github.com/boltdb/bolt"
 	"github.com/containerd/containerd/content"
 	"github.com/containerd/containerd/errdefs"
 	"github.com/containerd/containerd/filters"
@@ -34,6 +33,7 @@ import (
 	digest "github.com/opencontainers/go-digest"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/pkg/errors"
+	bolt "go.etcd.io/bbolt"
 )
 
 type contentStore struct {
@@ -592,9 +592,6 @@ func (nw *namespacedWriter) commit(ctx context.Context, tx *bolt.Tx, size int64,
 		}
 		size = nw.desc.Size
 		actual = nw.desc.Digest
-		if getBlobBucket(tx, nw.namespace, actual) != nil {
-			return "", errors.Wrapf(errdefs.ErrAlreadyExists, "content %v", actual)
-		}
 	} else {
 		status, err := nw.w.Status()
 		if err != nil {
@@ -606,18 +603,16 @@ func (nw *namespacedWriter) commit(ctx context.Context, tx *bolt.Tx, size int64,
 		size = status.Offset
 		actual = nw.w.Digest()
 
-		if err := nw.w.Commit(ctx, size, expected); err != nil {
-			if !errdefs.IsAlreadyExists(err) {
-				return "", err
-			}
-			if getBlobBucket(tx, nw.namespace, actual) != nil {
-				return "", errors.Wrapf(errdefs.ErrAlreadyExists, "content %v", actual)
-			}
+		if err := nw.w.Commit(ctx, size, expected); err != nil && !errdefs.IsAlreadyExists(err) {
+			return "", err
 		}
 	}
 
 	bkt, err := createBlobBucket(tx, nw.namespace, actual)
 	if err != nil {
+		if err == bolt.ErrBucketExists {
+			return "", errors.Wrapf(errdefs.ErrAlreadyExists, "content %v", actual)
+		}
 		return "", err
 	}
 

+ 2 - 2
vendor/github.com/containerd/containerd/metadata/db.go

@@ -23,12 +23,12 @@ import (
 	"sync"
 	"time"
 
-	"github.com/boltdb/bolt"
 	"github.com/containerd/containerd/content"
 	"github.com/containerd/containerd/gc"
 	"github.com/containerd/containerd/log"
 	"github.com/containerd/containerd/snapshots"
 	"github.com/pkg/errors"
+	bolt "go.etcd.io/bbolt"
 )
 
 const (
@@ -43,7 +43,7 @@ const (
 	// dbVersion represents updates to the schema
 	// version which are additions and compatible with
 	// prior version of the same schema.
-	dbVersion = 2
+	dbVersion = 3
 )
 
 // DB represents a metadata database backed by a bolt

+ 1 - 1
vendor/github.com/containerd/containerd/metadata/gc.go

@@ -23,10 +23,10 @@ import (
 	"strings"
 	"time"
 
-	"github.com/boltdb/bolt"
 	"github.com/containerd/containerd/gc"
 	"github.com/containerd/containerd/log"
 	"github.com/pkg/errors"
+	bolt "go.etcd.io/bbolt"
 )
 
 const (

+ 1 - 1
vendor/github.com/containerd/containerd/metadata/images.go

@@ -23,7 +23,6 @@ import (
 	"strings"
 	"time"
 
-	"github.com/boltdb/bolt"
 	"github.com/containerd/containerd/errdefs"
 	"github.com/containerd/containerd/filters"
 	"github.com/containerd/containerd/images"
@@ -33,6 +32,7 @@ import (
 	digest "github.com/opencontainers/go-digest"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/pkg/errors"
+	bolt "go.etcd.io/bbolt"
 )
 
 type imageStore struct {

+ 1 - 1
vendor/github.com/containerd/containerd/metadata/leases.go

@@ -20,7 +20,6 @@ import (
 	"context"
 	"time"
 
-	"github.com/boltdb/bolt"
 	"github.com/containerd/containerd/errdefs"
 	"github.com/containerd/containerd/filters"
 	"github.com/containerd/containerd/leases"
@@ -28,6 +27,7 @@ import (
 	"github.com/containerd/containerd/namespaces"
 	digest "github.com/opencontainers/go-digest"
 	"github.com/pkg/errors"
+	bolt "go.etcd.io/bbolt"
 )
 
 // LeaseManager manages the create/delete lifecyle of leases

+ 13 - 1
vendor/github.com/containerd/containerd/metadata/migrations.go

@@ -16,7 +16,7 @@
 
 package metadata
 
-import "github.com/boltdb/bolt"
+import bolt "go.etcd.io/bbolt"
 
 type migration struct {
 	schema  string
@@ -45,6 +45,11 @@ var migrations = []migration{
 		version: 2,
 		migrate: migrateIngests,
 	},
+	{
+		schema:  "v1",
+		version: 3,
+		migrate: noOpMigration,
+	},
 }
 
 // addChildLinks Adds children key to the snapshotters to enforce snapshot
@@ -154,3 +159,10 @@ func migrateIngests(tx *bolt.Tx) error {
 
 	return nil
 }
+
+// noOpMigration was for a database change from boltdb/bolt which is no
+// longer being supported, to go.etcd.io/bbolt which is the currently
+// maintained repo for boltdb.
+func noOpMigration(tx *bolt.Tx) error {
+	return nil
+}

+ 1 - 1
vendor/github.com/containerd/containerd/metadata/namespaces.go

@@ -19,11 +19,11 @@ package metadata
 import (
 	"context"
 
-	"github.com/boltdb/bolt"
 	"github.com/containerd/containerd/errdefs"
 	l "github.com/containerd/containerd/labels"
 	"github.com/containerd/containerd/namespaces"
 	"github.com/pkg/errors"
+	bolt "go.etcd.io/bbolt"
 )
 
 type namespaceStore struct {

+ 1 - 1
vendor/github.com/containerd/containerd/metadata/snapshot.go

@@ -23,7 +23,6 @@ import (
 	"sync"
 	"time"
 
-	"github.com/boltdb/bolt"
 	"github.com/containerd/containerd/errdefs"
 	"github.com/containerd/containerd/labels"
 	"github.com/containerd/containerd/log"
@@ -32,6 +31,7 @@ import (
 	"github.com/containerd/containerd/namespaces"
 	"github.com/containerd/containerd/snapshots"
 	"github.com/pkg/errors"
+	bolt "go.etcd.io/bbolt"
 )
 
 type snapshotter struct {

+ 4 - 0
vendor/github.com/containerd/containerd/mount/mount_windows.go

@@ -32,6 +32,10 @@ var (
 
 // Mount to the provided target
 func (m *Mount) Mount(target string) error {
+	if m.Type != "windows-layer" {
+		return errors.Errorf("invalid windows mount type: '%s'", m.Type)
+	}
+
 	home, layerID := filepath.Split(m.Source)
 
 	parentLayerPaths, err := m.GetParentPaths()

+ 213 - 6
vendor/github.com/containerd/containerd/oci/spec.go

@@ -18,11 +18,27 @@ package oci
 
 import (
 	"context"
+	"path/filepath"
+	"runtime"
+
+	"github.com/containerd/containerd/namespaces"
+	"github.com/containerd/containerd/platforms"
 
 	"github.com/containerd/containerd/containers"
 	specs "github.com/opencontainers/runtime-spec/specs-go"
 )
 
+const (
+	rwm               = "rwm"
+	defaultRootfsPath = "rootfs"
+)
+
+var (
+	defaultUnixEnv = []string{
+		"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+	}
+)
+
 // Spec is a type alias to the OCI runtime spec to allow third part SpecOpts
 // to be created without the "issues" with go vendoring and package imports
 type Spec = specs.Spec
@@ -30,12 +46,36 @@ type Spec = specs.Spec
 // GenerateSpec will generate a default spec from the provided image
 // for use as a containerd container
 func GenerateSpec(ctx context.Context, client Client, c *containers.Container, opts ...SpecOpts) (*Spec, error) {
-	s, err := createDefaultSpec(ctx, c.ID)
-	if err != nil {
+	return GenerateSpecWithPlatform(ctx, client, platforms.DefaultString(), c, opts...)
+}
+
+// GenerateSpecWithPlatform will generate a default spec from the provided image
+// for use as a containerd container in the platform requested.
+func GenerateSpecWithPlatform(ctx context.Context, client Client, platform string, c *containers.Container, opts ...SpecOpts) (*Spec, error) {
+	var s Spec
+	if err := generateDefaultSpecWithPlatform(ctx, platform, c.ID, &s); err != nil {
 		return nil, err
 	}
 
-	return s, ApplyOpts(ctx, client, c, s, opts...)
+	return &s, ApplyOpts(ctx, client, c, &s, opts...)
+}
+
+func generateDefaultSpecWithPlatform(ctx context.Context, platform, id string, s *Spec) error {
+	plat, err := platforms.Parse(platform)
+	if err != nil {
+		return err
+	}
+
+	if plat.OS == "windows" {
+		err = populateDefaultWindowsSpec(ctx, s, id)
+	} else {
+		err = populateDefaultUnixSpec(ctx, s, id)
+		if err == nil && runtime.GOOS == "windows" {
+			// To run LCOW we have a Linux and Windows section. Add an empty one now.
+			s.Windows = &specs.Windows{}
+		}
+	}
+	return err
 }
 
 // ApplyOpts applys the options to the given spec, injecting data from the
@@ -50,7 +90,174 @@ func ApplyOpts(ctx context.Context, client Client, c *containers.Container, s *S
 	return nil
 }
 
-func createDefaultSpec(ctx context.Context, id string) (*Spec, error) {
-	var s Spec
-	return &s, populateDefaultSpec(ctx, &s, id)
+func defaultUnixCaps() []string {
+	return []string{
+		"CAP_CHOWN",
+		"CAP_DAC_OVERRIDE",
+		"CAP_FSETID",
+		"CAP_FOWNER",
+		"CAP_MKNOD",
+		"CAP_NET_RAW",
+		"CAP_SETGID",
+		"CAP_SETUID",
+		"CAP_SETFCAP",
+		"CAP_SETPCAP",
+		"CAP_NET_BIND_SERVICE",
+		"CAP_SYS_CHROOT",
+		"CAP_KILL",
+		"CAP_AUDIT_WRITE",
+	}
+}
+
+func defaultUnixNamespaces() []specs.LinuxNamespace {
+	return []specs.LinuxNamespace{
+		{
+			Type: specs.PIDNamespace,
+		},
+		{
+			Type: specs.IPCNamespace,
+		},
+		{
+			Type: specs.UTSNamespace,
+		},
+		{
+			Type: specs.MountNamespace,
+		},
+		{
+			Type: specs.NetworkNamespace,
+		},
+	}
+}
+
+func populateDefaultUnixSpec(ctx context.Context, s *Spec, id string) error {
+	ns, err := namespaces.NamespaceRequired(ctx)
+	if err != nil {
+		return err
+	}
+
+	*s = Spec{
+		Version: specs.Version,
+		Root: &specs.Root{
+			Path: defaultRootfsPath,
+		},
+		Process: &specs.Process{
+			Env:             defaultUnixEnv,
+			Cwd:             "/",
+			NoNewPrivileges: true,
+			User: specs.User{
+				UID: 0,
+				GID: 0,
+			},
+			Capabilities: &specs.LinuxCapabilities{
+				Bounding:    defaultUnixCaps(),
+				Permitted:   defaultUnixCaps(),
+				Inheritable: defaultUnixCaps(),
+				Effective:   defaultUnixCaps(),
+			},
+			Rlimits: []specs.POSIXRlimit{
+				{
+					Type: "RLIMIT_NOFILE",
+					Hard: uint64(1024),
+					Soft: uint64(1024),
+				},
+			},
+		},
+		Mounts: []specs.Mount{
+			{
+				Destination: "/proc",
+				Type:        "proc",
+				Source:      "proc",
+				Options:     []string{"nosuid", "noexec", "nodev"},
+			},
+			{
+				Destination: "/dev",
+				Type:        "tmpfs",
+				Source:      "tmpfs",
+				Options:     []string{"nosuid", "strictatime", "mode=755", "size=65536k"},
+			},
+			{
+				Destination: "/dev/pts",
+				Type:        "devpts",
+				Source:      "devpts",
+				Options:     []string{"nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5"},
+			},
+			{
+				Destination: "/dev/shm",
+				Type:        "tmpfs",
+				Source:      "shm",
+				Options:     []string{"nosuid", "noexec", "nodev", "mode=1777", "size=65536k"},
+			},
+			{
+				Destination: "/dev/mqueue",
+				Type:        "mqueue",
+				Source:      "mqueue",
+				Options:     []string{"nosuid", "noexec", "nodev"},
+			},
+			{
+				Destination: "/sys",
+				Type:        "sysfs",
+				Source:      "sysfs",
+				Options:     []string{"nosuid", "noexec", "nodev", "ro"},
+			},
+			{
+				Destination: "/run",
+				Type:        "tmpfs",
+				Source:      "tmpfs",
+				Options:     []string{"nosuid", "strictatime", "mode=755", "size=65536k"},
+			},
+		},
+		Linux: &specs.Linux{
+			MaskedPaths: []string{
+				"/proc/acpi",
+				"/proc/kcore",
+				"/proc/keys",
+				"/proc/latency_stats",
+				"/proc/timer_list",
+				"/proc/timer_stats",
+				"/proc/sched_debug",
+				"/sys/firmware",
+				"/proc/scsi",
+			},
+			ReadonlyPaths: []string{
+				"/proc/asound",
+				"/proc/bus",
+				"/proc/fs",
+				"/proc/irq",
+				"/proc/sys",
+				"/proc/sysrq-trigger",
+			},
+			CgroupsPath: filepath.Join("/", ns, id),
+			Resources: &specs.LinuxResources{
+				Devices: []specs.LinuxDeviceCgroup{
+					{
+						Allow:  false,
+						Access: rwm,
+					},
+				},
+			},
+			Namespaces: defaultUnixNamespaces(),
+		},
+	}
+	return nil
+}
+
+func populateDefaultWindowsSpec(ctx context.Context, s *Spec, id string) error {
+	*s = Spec{
+		Version: specs.Version,
+		Root:    &specs.Root{},
+		Process: &specs.Process{
+			Cwd: `C:\`,
+			ConsoleSize: &specs.Box{
+				Width:  80,
+				Height: 20,
+			},
+		},
+		Windows: &specs.Windows{
+			IgnoreFlushesDuringBoot: true,
+			Network: &specs.WindowsNetwork{
+				AllowUnqualifiedDNSQuery: true,
+			},
+		},
+	}
+	return nil
 }

+ 881 - 33
vendor/github.com/containerd/containerd/oci/spec_opts.go

@@ -19,12 +19,25 @@ package oci
 import (
 	"context"
 	"encoding/json"
+	"fmt"
 	"io/ioutil"
+	"os"
+	"path/filepath"
+	"strconv"
 	"strings"
 
 	"github.com/containerd/containerd/containers"
+	"github.com/containerd/containerd/content"
+	"github.com/containerd/containerd/images"
+	"github.com/containerd/containerd/mount"
+	"github.com/containerd/containerd/namespaces"
+	"github.com/containerd/containerd/platforms"
+	"github.com/containerd/continuity/fs"
+	"github.com/opencontainers/image-spec/specs-go/v1"
+	"github.com/opencontainers/runc/libcontainer/user"
 	specs "github.com/opencontainers/runtime-spec/specs-go"
 	"github.com/pkg/errors"
+	"github.com/syndtr/gocapability/capability"
 )
 
 // SpecOpts sets spec specific information to a newly generated OCI spec
@@ -49,13 +62,45 @@ func setProcess(s *Spec) {
 	}
 }
 
+// setRoot sets Root to empty if unset
+func setRoot(s *Spec) {
+	if s.Root == nil {
+		s.Root = &specs.Root{}
+	}
+}
+
+// setLinux sets Linux to empty if unset
+func setLinux(s *Spec) {
+	if s.Linux == nil {
+		s.Linux = &specs.Linux{}
+	}
+}
+
+// setCapabilities sets Linux Capabilities to empty if unset
+func setCapabilities(s *Spec) {
+	setProcess(s)
+	if s.Process.Capabilities == nil {
+		s.Process.Capabilities = &specs.LinuxCapabilities{}
+	}
+}
+
 // WithDefaultSpec returns a SpecOpts that will populate the spec with default
 // values.
 //
 // Use as the first option to clear the spec, then apply options afterwards.
 func WithDefaultSpec() SpecOpts {
 	return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error {
-		return populateDefaultSpec(ctx, s, c.ID)
+		return generateDefaultSpecWithPlatform(ctx, platforms.DefaultString(), c.ID, s)
+	}
+}
+
+// WithDefaultSpecForPlatform returns a SpecOpts that will populate the spec
+// with default values for a given platform.
+//
+// Use as the first option to clear the spec, then apply options afterwards.
+func WithDefaultSpecForPlatform(platform string) SpecOpts {
+	return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error {
+		return generateDefaultSpecWithPlatform(ctx, platform, c.ID, s)
 	}
 }
 
@@ -81,6 +126,55 @@ func WithSpecFromFile(filename string) SpecOpts {
 	}
 }
 
+// WithEnv appends environment variables
+func WithEnv(environmentVariables []string) SpecOpts {
+	return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
+		if len(environmentVariables) > 0 {
+			setProcess(s)
+			s.Process.Env = replaceOrAppendEnvValues(s.Process.Env, environmentVariables)
+		}
+		return nil
+	}
+}
+
+// replaceOrAppendEnvValues returns the defaults with the overrides either
+// replaced by env key or appended to the list
+func replaceOrAppendEnvValues(defaults, overrides []string) []string {
+	cache := make(map[string]int, len(defaults))
+	for i, e := range defaults {
+		parts := strings.SplitN(e, "=", 2)
+		cache[parts[0]] = i
+	}
+
+	for _, value := range overrides {
+		// Values w/o = means they want this env to be removed/unset.
+		if !strings.Contains(value, "=") {
+			if i, exists := cache[value]; exists {
+				defaults[i] = "" // Used to indicate it should be removed
+			}
+			continue
+		}
+
+		// Just do a normal set/update
+		parts := strings.SplitN(value, "=", 2)
+		if i, exists := cache[parts[0]]; exists {
+			defaults[i] = value
+		} else {
+			defaults = append(defaults, value)
+		}
+	}
+
+	// Now remove all entries that we want to "unset"
+	for i := 0; i < len(defaults); i++ {
+		if defaults[i] == "" {
+			defaults = append(defaults[:i], defaults[i+1:]...)
+			i--
+		}
+	}
+
+	return defaults
+}
+
 // WithProcessArgs replaces the args on the generated spec
 func WithProcessArgs(args ...string) SpecOpts {
 	return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
@@ -99,6 +193,32 @@ func WithProcessCwd(cwd string) SpecOpts {
 	}
 }
 
+// WithTTY sets the information on the spec as well as the environment variables for
+// using a TTY
+func WithTTY(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
+	setProcess(s)
+	s.Process.Terminal = true
+	if s.Linux != nil {
+		s.Process.Env = append(s.Process.Env, "TERM=xterm")
+	}
+
+	return nil
+}
+
+// WithTTYSize sets the information on the spec as well as the environment variables for
+// using a TTY
+func WithTTYSize(width, height int) SpecOpts {
+	return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
+		setProcess(s)
+		if s.Process.ConsoleSize == nil {
+			s.Process.ConsoleSize = &specs.Box{}
+		}
+		s.Process.ConsoleSize.Width = uint(width)
+		s.Process.ConsoleSize.Height = uint(height)
+		return nil
+	}
+}
+
 // WithHostname sets the container's hostname
 func WithHostname(name string) SpecOpts {
 	return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
@@ -107,59 +227,787 @@ func WithHostname(name string) SpecOpts {
 	}
 }
 
-// WithEnv appends environment variables
-func WithEnv(environmentVariables []string) SpecOpts {
+// WithMounts appends mounts
+func WithMounts(mounts []specs.Mount) SpecOpts {
 	return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
-		if len(environmentVariables) > 0 {
-			setProcess(s)
-			s.Process.Env = replaceOrAppendEnvValues(s.Process.Env, environmentVariables)
+		s.Mounts = append(s.Mounts, mounts...)
+		return nil
+	}
+}
+
+// WithHostNamespace allows a task to run inside the host's linux namespace
+func WithHostNamespace(ns specs.LinuxNamespaceType) SpecOpts {
+	return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
+		setLinux(s)
+		for i, n := range s.Linux.Namespaces {
+			if n.Type == ns {
+				s.Linux.Namespaces = append(s.Linux.Namespaces[:i], s.Linux.Namespaces[i+1:]...)
+				return nil
+			}
 		}
 		return nil
 	}
 }
 
-// WithMounts appends mounts
-func WithMounts(mounts []specs.Mount) SpecOpts {
+// WithLinuxNamespace uses the passed in namespace for the spec. If a namespace of the same type already exists in the
+// spec, the existing namespace is replaced by the one provided.
+func WithLinuxNamespace(ns specs.LinuxNamespace) SpecOpts {
 	return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
-		s.Mounts = append(s.Mounts, mounts...)
+		setLinux(s)
+		for i, n := range s.Linux.Namespaces {
+			if n.Type == ns.Type {
+				before := s.Linux.Namespaces[:i]
+				after := s.Linux.Namespaces[i+1:]
+				s.Linux.Namespaces = append(before, ns)
+				s.Linux.Namespaces = append(s.Linux.Namespaces, after...)
+				return nil
+			}
+		}
+		s.Linux.Namespaces = append(s.Linux.Namespaces, ns)
 		return nil
 	}
 }
 
-// replaceOrAppendEnvValues returns the defaults with the overrides either
-// replaced by env key or appended to the list
-func replaceOrAppendEnvValues(defaults, overrides []string) []string {
-	cache := make(map[string]int, len(defaults))
-	for i, e := range defaults {
-		parts := strings.SplitN(e, "=", 2)
-		cache[parts[0]] = i
+// WithNewPrivileges turns off the NoNewPrivileges feature flag in the spec
+func WithNewPrivileges(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
+	setProcess(s)
+	s.Process.NoNewPrivileges = false
+
+	return nil
+}
+
+// WithImageConfig configures the spec to from the configuration of an Image
+func WithImageConfig(image Image) SpecOpts {
+	return WithImageConfigArgs(image, nil)
+}
+
+// WithImageConfigArgs configures the spec to from the configuration of an Image with additional args that
+// replaces the CMD of the image
+func WithImageConfigArgs(image Image, args []string) SpecOpts {
+	return func(ctx context.Context, client Client, c *containers.Container, s *Spec) error {
+		ic, err := image.Config(ctx)
+		if err != nil {
+			return err
+		}
+		var (
+			ociimage v1.Image
+			config   v1.ImageConfig
+		)
+		switch ic.MediaType {
+		case v1.MediaTypeImageConfig, images.MediaTypeDockerSchema2Config:
+			p, err := content.ReadBlob(ctx, image.ContentStore(), ic)
+			if err != nil {
+				return err
+			}
+
+			if err := json.Unmarshal(p, &ociimage); err != nil {
+				return err
+			}
+			config = ociimage.Config
+		default:
+			return fmt.Errorf("unknown image config media type %s", ic.MediaType)
+		}
+
+		setProcess(s)
+		if s.Linux != nil {
+			s.Process.Env = append(s.Process.Env, config.Env...)
+			cmd := config.Cmd
+			if len(args) > 0 {
+				cmd = args
+			}
+			s.Process.Args = append(config.Entrypoint, cmd...)
+
+			cwd := config.WorkingDir
+			if cwd == "" {
+				cwd = "/"
+			}
+			s.Process.Cwd = cwd
+			if config.User != "" {
+				if err := WithUser(config.User)(ctx, client, c, s); err != nil {
+					return err
+				}
+				return WithAdditionalGIDs(fmt.Sprintf("%d", s.Process.User.UID))(ctx, client, c, s)
+			}
+			// we should query the image's /etc/group for additional GIDs
+			// even if there is no specified user in the image config
+			return WithAdditionalGIDs("root")(ctx, client, c, s)
+		} else if s.Windows != nil {
+			s.Process.Env = config.Env
+			s.Process.Args = append(config.Entrypoint, config.Cmd...)
+			s.Process.User = specs.User{
+				Username: config.User,
+			}
+		} else {
+			return errors.New("spec does not contain Linux or Windows section")
+		}
+		return nil
 	}
+}
 
-	for _, value := range overrides {
-		// Values w/o = means they want this env to be removed/unset.
-		if !strings.Contains(value, "=") {
-			if i, exists := cache[value]; exists {
-				defaults[i] = "" // Used to indicate it should be removed
+// WithRootFSPath specifies unmanaged rootfs path.
+func WithRootFSPath(path string) SpecOpts {
+	return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
+		setRoot(s)
+		s.Root.Path = path
+		// Entrypoint is not set here (it's up to caller)
+		return nil
+	}
+}
+
+// WithRootFSReadonly sets specs.Root.Readonly to true
+func WithRootFSReadonly() SpecOpts {
+	return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
+		setRoot(s)
+		s.Root.Readonly = true
+		return nil
+	}
+}
+
+// WithNoNewPrivileges sets no_new_privileges on the process for the container
+func WithNoNewPrivileges(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
+	setProcess(s)
+	s.Process.NoNewPrivileges = true
+	return nil
+}
+
+// WithHostHostsFile bind-mounts the host's /etc/hosts into the container as readonly
+func WithHostHostsFile(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
+	s.Mounts = append(s.Mounts, specs.Mount{
+		Destination: "/etc/hosts",
+		Type:        "bind",
+		Source:      "/etc/hosts",
+		Options:     []string{"rbind", "ro"},
+	})
+	return nil
+}
+
+// WithHostResolvconf bind-mounts the host's /etc/resolv.conf into the container as readonly
+func WithHostResolvconf(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
+	s.Mounts = append(s.Mounts, specs.Mount{
+		Destination: "/etc/resolv.conf",
+		Type:        "bind",
+		Source:      "/etc/resolv.conf",
+		Options:     []string{"rbind", "ro"},
+	})
+	return nil
+}
+
+// WithHostLocaltime bind-mounts the host's /etc/localtime into the container as readonly
+func WithHostLocaltime(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
+	s.Mounts = append(s.Mounts, specs.Mount{
+		Destination: "/etc/localtime",
+		Type:        "bind",
+		Source:      "/etc/localtime",
+		Options:     []string{"rbind", "ro"},
+	})
+	return nil
+}
+
+// WithUserNamespace sets the uid and gid mappings for the task
+// this can be called multiple times to add more mappings to the generated spec
+func WithUserNamespace(container, host, size uint32) SpecOpts {
+	return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
+		var hasUserns bool
+		setLinux(s)
+		for _, ns := range s.Linux.Namespaces {
+			if ns.Type == specs.UserNamespace {
+				hasUserns = true
+				break
 			}
-			continue
 		}
+		if !hasUserns {
+			s.Linux.Namespaces = append(s.Linux.Namespaces, specs.LinuxNamespace{
+				Type: specs.UserNamespace,
+			})
+		}
+		mapping := specs.LinuxIDMapping{
+			ContainerID: container,
+			HostID:      host,
+			Size:        size,
+		}
+		s.Linux.UIDMappings = append(s.Linux.UIDMappings, mapping)
+		s.Linux.GIDMappings = append(s.Linux.GIDMappings, mapping)
+		return nil
+	}
+}
 
-		// Just do a normal set/update
-		parts := strings.SplitN(value, "=", 2)
-		if i, exists := cache[parts[0]]; exists {
-			defaults[i] = value
+// WithCgroup sets the container's cgroup path
+func WithCgroup(path string) SpecOpts {
+	return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
+		setLinux(s)
+		s.Linux.CgroupsPath = path
+		return nil
+	}
+}
+
+// WithNamespacedCgroup uses the namespace set on the context to create a
+// root directory for containers in the cgroup with the id as the subcgroup
+func WithNamespacedCgroup() SpecOpts {
+	return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error {
+		namespace, err := namespaces.NamespaceRequired(ctx)
+		if err != nil {
+			return err
+		}
+		setLinux(s)
+		s.Linux.CgroupsPath = filepath.Join("/", namespace, c.ID)
+		return nil
+	}
+}
+
+// WithUser sets the user to be used within the container.
+// It accepts a valid user string in OCI Image Spec v1.0.0:
+//   user, uid, user:group, uid:gid, uid:group, user:gid
+func WithUser(userstr string) SpecOpts {
+	return func(ctx context.Context, client Client, c *containers.Container, s *Spec) error {
+		setProcess(s)
+		parts := strings.Split(userstr, ":")
+		switch len(parts) {
+		case 1:
+			v, err := strconv.Atoi(parts[0])
+			if err != nil {
+				// if we cannot parse as a uint they try to see if it is a username
+				return WithUsername(userstr)(ctx, client, c, s)
+			}
+			return WithUserID(uint32(v))(ctx, client, c, s)
+		case 2:
+			var (
+				username  string
+				groupname string
+			)
+			var uid, gid uint32
+			v, err := strconv.Atoi(parts[0])
+			if err != nil {
+				username = parts[0]
+			} else {
+				uid = uint32(v)
+			}
+			if v, err = strconv.Atoi(parts[1]); err != nil {
+				groupname = parts[1]
+			} else {
+				gid = uint32(v)
+			}
+			if username == "" && groupname == "" {
+				s.Process.User.UID, s.Process.User.GID = uid, gid
+				return nil
+			}
+			f := func(root string) error {
+				if username != "" {
+					user, err := getUserFromPath(root, func(u user.User) bool {
+						return u.Name == username
+					})
+					if err != nil {
+						return err
+					}
+					uid = uint32(user.Uid)
+				}
+				if groupname != "" {
+					gid, err = getGIDFromPath(root, func(g user.Group) bool {
+						return g.Name == groupname
+					})
+					if err != nil {
+						return err
+					}
+				}
+				s.Process.User.UID, s.Process.User.GID = uid, gid
+				return nil
+			}
+			if c.Snapshotter == "" && c.SnapshotKey == "" {
+				if !isRootfsAbs(s.Root.Path) {
+					return errors.New("rootfs absolute path is required")
+				}
+				return f(s.Root.Path)
+			}
+			if c.Snapshotter == "" {
+				return errors.New("no snapshotter set for container")
+			}
+			if c.SnapshotKey == "" {
+				return errors.New("rootfs snapshot not created for container")
+			}
+			snapshotter := client.SnapshotService(c.Snapshotter)
+			mounts, err := snapshotter.Mounts(ctx, c.SnapshotKey)
+			if err != nil {
+				return err
+			}
+			return mount.WithTempMount(ctx, mounts, f)
+		default:
+			return fmt.Errorf("invalid USER value %s", userstr)
+		}
+	}
+}
+
+// WithUIDGID allows the UID and GID for the Process to be set
+func WithUIDGID(uid, gid uint32) SpecOpts {
+	return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
+		setProcess(s)
+		s.Process.User.UID = uid
+		s.Process.User.GID = gid
+		return nil
+	}
+}
+
+// WithUserID sets the correct UID and GID for the container based
+// on the image's /etc/passwd contents. If /etc/passwd does not exist,
+// or uid is not found in /etc/passwd, it sets the requested uid,
+// additionally sets the gid to 0, and does not return an error.
+func WithUserID(uid uint32) SpecOpts {
+	return func(ctx context.Context, client Client, c *containers.Container, s *Spec) (err error) {
+		setProcess(s)
+		if c.Snapshotter == "" && c.SnapshotKey == "" {
+			if !isRootfsAbs(s.Root.Path) {
+				return errors.Errorf("rootfs absolute path is required")
+			}
+			user, err := getUserFromPath(s.Root.Path, func(u user.User) bool {
+				return u.Uid == int(uid)
+			})
+			if err != nil {
+				if os.IsNotExist(err) || err == errNoUsersFound {
+					s.Process.User.UID, s.Process.User.GID = uid, 0
+					return nil
+				}
+				return err
+			}
+			s.Process.User.UID, s.Process.User.GID = uint32(user.Uid), uint32(user.Gid)
+			return nil
+
+		}
+		if c.Snapshotter == "" {
+			return errors.Errorf("no snapshotter set for container")
+		}
+		if c.SnapshotKey == "" {
+			return errors.Errorf("rootfs snapshot not created for container")
+		}
+		snapshotter := client.SnapshotService(c.Snapshotter)
+		mounts, err := snapshotter.Mounts(ctx, c.SnapshotKey)
+		if err != nil {
+			return err
+		}
+		return mount.WithTempMount(ctx, mounts, func(root string) error {
+			user, err := getUserFromPath(root, func(u user.User) bool {
+				return u.Uid == int(uid)
+			})
+			if err != nil {
+				if os.IsNotExist(err) || err == errNoUsersFound {
+					s.Process.User.UID, s.Process.User.GID = uid, 0
+					return nil
+				}
+				return err
+			}
+			s.Process.User.UID, s.Process.User.GID = uint32(user.Uid), uint32(user.Gid)
+			return nil
+		})
+	}
+}
+
+// WithUsername sets the correct UID and GID for the container
+// based on the the image's /etc/passwd contents. If /etc/passwd
+// does not exist, or the username is not found in /etc/passwd,
+// it returns error.
+func WithUsername(username string) SpecOpts {
+	return func(ctx context.Context, client Client, c *containers.Container, s *Spec) (err error) {
+		setProcess(s)
+		if s.Linux != nil {
+			if c.Snapshotter == "" && c.SnapshotKey == "" {
+				if !isRootfsAbs(s.Root.Path) {
+					return errors.Errorf("rootfs absolute path is required")
+				}
+				user, err := getUserFromPath(s.Root.Path, func(u user.User) bool {
+					return u.Name == username
+				})
+				if err != nil {
+					return err
+				}
+				s.Process.User.UID, s.Process.User.GID = uint32(user.Uid), uint32(user.Gid)
+				return nil
+			}
+			if c.Snapshotter == "" {
+				return errors.Errorf("no snapshotter set for container")
+			}
+			if c.SnapshotKey == "" {
+				return errors.Errorf("rootfs snapshot not created for container")
+			}
+			snapshotter := client.SnapshotService(c.Snapshotter)
+			mounts, err := snapshotter.Mounts(ctx, c.SnapshotKey)
+			if err != nil {
+				return err
+			}
+			return mount.WithTempMount(ctx, mounts, func(root string) error {
+				user, err := getUserFromPath(root, func(u user.User) bool {
+					return u.Name == username
+				})
+				if err != nil {
+					return err
+				}
+				s.Process.User.UID, s.Process.User.GID = uint32(user.Uid), uint32(user.Gid)
+				return nil
+			})
+		} else if s.Windows != nil {
+			s.Process.User.Username = username
 		} else {
-			defaults = append(defaults, value)
+			return errors.New("spec does not contain Linux or Windows section")
 		}
+		return nil
 	}
+}
 
-	// Now remove all entries that we want to "unset"
-	for i := 0; i < len(defaults); i++ {
-		if defaults[i] == "" {
-			defaults = append(defaults[:i], defaults[i+1:]...)
-			i--
+// WithAdditionalGIDs sets the OCI spec's additionalGids array to any additional groups listed
+// for a particular user in the /etc/groups file of the image's root filesystem
+// The passed in user can be either a uid or a username.
+func WithAdditionalGIDs(userstr string) SpecOpts {
+	return func(ctx context.Context, client Client, c *containers.Container, s *Spec) (err error) {
+		setProcess(s)
+		setAdditionalGids := func(root string) error {
+			var username string
+			uid, err := strconv.Atoi(userstr)
+			if err == nil {
+				user, err := getUserFromPath(root, func(u user.User) bool {
+					return u.Uid == uid
+				})
+				if err != nil {
+					if os.IsNotExist(err) || err == errNoUsersFound {
+						return nil
+					}
+					return err
+				}
+				username = user.Name
+			} else {
+				username = userstr
+			}
+			gids, err := getSupplementalGroupsFromPath(root, func(g user.Group) bool {
+				// we only want supplemental groups
+				if g.Name == username {
+					return false
+				}
+				for _, entry := range g.List {
+					if entry == username {
+						return true
+					}
+				}
+				return false
+			})
+			if err != nil {
+				if os.IsNotExist(err) {
+					return nil
+				}
+				return err
+			}
+			s.Process.User.AdditionalGids = gids
+			return nil
 		}
+		if c.Snapshotter == "" && c.SnapshotKey == "" {
+			if !isRootfsAbs(s.Root.Path) {
+				return errors.Errorf("rootfs absolute path is required")
+			}
+			return setAdditionalGids(s.Root.Path)
+		}
+		if c.Snapshotter == "" {
+			return errors.Errorf("no snapshotter set for container")
+		}
+		if c.SnapshotKey == "" {
+			return errors.Errorf("rootfs snapshot not created for container")
+		}
+		snapshotter := client.SnapshotService(c.Snapshotter)
+		mounts, err := snapshotter.Mounts(ctx, c.SnapshotKey)
+		if err != nil {
+			return err
+		}
+		return mount.WithTempMount(ctx, mounts, setAdditionalGids)
 	}
+}
 
-	return defaults
+// WithCapabilities sets Linux capabilities on the process
+func WithCapabilities(caps []string) SpecOpts {
+	return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
+		setCapabilities(s)
+
+		s.Process.Capabilities.Bounding = caps
+		s.Process.Capabilities.Effective = caps
+		s.Process.Capabilities.Permitted = caps
+		s.Process.Capabilities.Inheritable = caps
+
+		return nil
+	}
+}
+
+// WithAllCapabilities sets all linux capabilities for the process
+var WithAllCapabilities = WithCapabilities(getAllCapabilities())
+
+func getAllCapabilities() []string {
+	last := capability.CAP_LAST_CAP
+	// hack for RHEL6 which has no /proc/sys/kernel/cap_last_cap
+	if last == capability.Cap(63) {
+		last = capability.CAP_BLOCK_SUSPEND
+	}
+	var caps []string
+	for _, cap := range capability.List() {
+		if cap > last {
+			continue
+		}
+		caps = append(caps, "CAP_"+strings.ToUpper(cap.String()))
+	}
+	return caps
+}
+
+// WithAmbientCapabilities set the Linux ambient capabilities for the process
+// Ambient capabilities should only be set for non-root users or the caller should
+// understand how these capabilities are used and set
+func WithAmbientCapabilities(caps []string) SpecOpts {
+	return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
+		setCapabilities(s)
+
+		s.Process.Capabilities.Ambient = caps
+		return nil
+	}
+}
+
+var errNoUsersFound = errors.New("no users found")
+
+func getUserFromPath(root string, filter func(user.User) bool) (user.User, error) {
+	ppath, err := fs.RootPath(root, "/etc/passwd")
+	if err != nil {
+		return user.User{}, err
+	}
+	users, err := user.ParsePasswdFileFilter(ppath, filter)
+	if err != nil {
+		return user.User{}, err
+	}
+	if len(users) == 0 {
+		return user.User{}, errNoUsersFound
+	}
+	return users[0], nil
+}
+
+var errNoGroupsFound = errors.New("no groups found")
+
+func getGIDFromPath(root string, filter func(user.Group) bool) (gid uint32, err error) {
+	gpath, err := fs.RootPath(root, "/etc/group")
+	if err != nil {
+		return 0, err
+	}
+	groups, err := user.ParseGroupFileFilter(gpath, filter)
+	if err != nil {
+		return 0, err
+	}
+	if len(groups) == 0 {
+		return 0, errNoGroupsFound
+	}
+	g := groups[0]
+	return uint32(g.Gid), nil
+}
+
+func getSupplementalGroupsFromPath(root string, filter func(user.Group) bool) ([]uint32, error) {
+	gpath, err := fs.RootPath(root, "/etc/group")
+	if err != nil {
+		return []uint32{}, err
+	}
+	groups, err := user.ParseGroupFileFilter(gpath, filter)
+	if err != nil {
+		return []uint32{}, err
+	}
+	if len(groups) == 0 {
+		// if there are no additional groups; just return an empty set
+		return []uint32{}, nil
+	}
+	addlGids := []uint32{}
+	for _, grp := range groups {
+		addlGids = append(addlGids, uint32(grp.Gid))
+	}
+	return addlGids, nil
+}
+
+func isRootfsAbs(root string) bool {
+	return filepath.IsAbs(root)
 }
+
+// WithMaskedPaths sets the masked paths option
+func WithMaskedPaths(paths []string) SpecOpts {
+	return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
+		setLinux(s)
+		s.Linux.MaskedPaths = paths
+		return nil
+	}
+}
+
+// WithReadonlyPaths sets the read only paths option
+func WithReadonlyPaths(paths []string) SpecOpts {
+	return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
+		setLinux(s)
+		s.Linux.ReadonlyPaths = paths
+		return nil
+	}
+}
+
+// WithWriteableSysfs makes any sysfs mounts writeable
+func WithWriteableSysfs(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
+	for i, m := range s.Mounts {
+		if m.Type == "sysfs" {
+			var options []string
+			for _, o := range m.Options {
+				if o == "ro" {
+					o = "rw"
+				}
+				options = append(options, o)
+			}
+			s.Mounts[i].Options = options
+		}
+	}
+	return nil
+}
+
+// WithWriteableCgroupfs makes any cgroup mounts writeable
+func WithWriteableCgroupfs(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
+	for i, m := range s.Mounts {
+		if m.Type == "cgroup" {
+			var options []string
+			for _, o := range m.Options {
+				if o == "ro" {
+					o = "rw"
+				}
+				options = append(options, o)
+			}
+			s.Mounts[i].Options = options
+		}
+	}
+	return nil
+}
+
+// WithSelinuxLabel sets the process SELinux label
+func WithSelinuxLabel(label string) SpecOpts {
+	return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
+		setProcess(s)
+		s.Process.SelinuxLabel = label
+		return nil
+	}
+}
+
+// WithApparmorProfile sets the Apparmor profile for the process
+func WithApparmorProfile(profile string) SpecOpts {
+	return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
+		setProcess(s)
+		s.Process.ApparmorProfile = profile
+		return nil
+	}
+}
+
+// WithSeccompUnconfined clears the seccomp profile
+func WithSeccompUnconfined(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
+	setLinux(s)
+	s.Linux.Seccomp = nil
+	return nil
+}
+
+// WithParentCgroupDevices uses the default cgroup setup to inherit the container's parent cgroup's
+// allowed and denied devices
+func WithParentCgroupDevices(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
+	setLinux(s)
+	if s.Linux.Resources == nil {
+		s.Linux.Resources = &specs.LinuxResources{}
+	}
+	s.Linux.Resources.Devices = nil
+	return nil
+}
+
+// WithDefaultUnixDevices adds the default devices for unix such as /dev/null, /dev/random to
+// the container's resource cgroup spec
+func WithDefaultUnixDevices(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
+	setLinux(s)
+	if s.Linux.Resources == nil {
+		s.Linux.Resources = &specs.LinuxResources{}
+	}
+	intptr := func(i int64) *int64 {
+		return &i
+	}
+	s.Linux.Resources.Devices = append(s.Linux.Resources.Devices, []specs.LinuxDeviceCgroup{
+		{
+			// "/dev/null",
+			Type:   "c",
+			Major:  intptr(1),
+			Minor:  intptr(3),
+			Access: rwm,
+			Allow:  true,
+		},
+		{
+			// "/dev/random",
+			Type:   "c",
+			Major:  intptr(1),
+			Minor:  intptr(8),
+			Access: rwm,
+			Allow:  true,
+		},
+		{
+			// "/dev/full",
+			Type:   "c",
+			Major:  intptr(1),
+			Minor:  intptr(7),
+			Access: rwm,
+			Allow:  true,
+		},
+		{
+			// "/dev/tty",
+			Type:   "c",
+			Major:  intptr(5),
+			Minor:  intptr(0),
+			Access: rwm,
+			Allow:  true,
+		},
+		{
+			// "/dev/zero",
+			Type:   "c",
+			Major:  intptr(1),
+			Minor:  intptr(5),
+			Access: rwm,
+			Allow:  true,
+		},
+		{
+			// "/dev/urandom",
+			Type:   "c",
+			Major:  intptr(1),
+			Minor:  intptr(9),
+			Access: rwm,
+			Allow:  true,
+		},
+		{
+			// "/dev/console",
+			Type:   "c",
+			Major:  intptr(5),
+			Minor:  intptr(1),
+			Access: rwm,
+			Allow:  true,
+		},
+		// /dev/pts/ - pts namespaces are "coming soon"
+		{
+			Type:   "c",
+			Major:  intptr(136),
+			Access: rwm,
+			Allow:  true,
+		},
+		{
+			Type:   "c",
+			Major:  intptr(5),
+			Minor:  intptr(2),
+			Access: rwm,
+			Allow:  true,
+		},
+		{
+			// tuntap
+			Type:   "c",
+			Major:  intptr(10),
+			Minor:  intptr(200),
+			Access: rwm,
+			Allow:  true,
+		},
+	}...)
+	return nil
+}
+
+// WithPrivileged sets up options for a privileged container
+// TODO(justincormack) device handling
+var WithPrivileged = Compose(
+	WithAllCapabilities,
+	WithMaskedPaths(nil),
+	WithReadonlyPaths(nil),
+	WithWriteableSysfs,
+	WithWriteableCgroupfs,
+	WithSelinuxLabel(""),
+	WithApparmorProfile(""),
+	WithSeccompUnconfined,
+)

+ 0 - 733
vendor/github.com/containerd/containerd/oci/spec_opts_unix.go

@@ -1,733 +0,0 @@
-// +build !windows
-
-/*
-   Copyright The containerd Authors.
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
-*/
-
-package oci
-
-import (
-	"context"
-	"encoding/json"
-	"fmt"
-	"os"
-	"path/filepath"
-	"strconv"
-	"strings"
-
-	"github.com/containerd/containerd/containers"
-	"github.com/containerd/containerd/content"
-	"github.com/containerd/containerd/images"
-	"github.com/containerd/containerd/mount"
-	"github.com/containerd/containerd/namespaces"
-	"github.com/containerd/continuity/fs"
-	"github.com/opencontainers/image-spec/specs-go/v1"
-	"github.com/opencontainers/runc/libcontainer/user"
-	specs "github.com/opencontainers/runtime-spec/specs-go"
-	"github.com/pkg/errors"
-	"github.com/syndtr/gocapability/capability"
-)
-
-// WithTTY sets the information on the spec as well as the environment variables for
-// using a TTY
-func WithTTY(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
-	setProcess(s)
-	s.Process.Terminal = true
-	s.Process.Env = append(s.Process.Env, "TERM=xterm")
-	return nil
-}
-
-// setRoot sets Root to empty if unset
-func setRoot(s *Spec) {
-	if s.Root == nil {
-		s.Root = &specs.Root{}
-	}
-}
-
-// setLinux sets Linux to empty if unset
-func setLinux(s *Spec) {
-	if s.Linux == nil {
-		s.Linux = &specs.Linux{}
-	}
-}
-
-// setCapabilities sets Linux Capabilities to empty if unset
-func setCapabilities(s *Spec) {
-	setProcess(s)
-	if s.Process.Capabilities == nil {
-		s.Process.Capabilities = &specs.LinuxCapabilities{}
-	}
-}
-
-// WithHostNamespace allows a task to run inside the host's linux namespace
-func WithHostNamespace(ns specs.LinuxNamespaceType) SpecOpts {
-	return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
-		setLinux(s)
-		for i, n := range s.Linux.Namespaces {
-			if n.Type == ns {
-				s.Linux.Namespaces = append(s.Linux.Namespaces[:i], s.Linux.Namespaces[i+1:]...)
-				return nil
-			}
-		}
-		return nil
-	}
-}
-
-// WithLinuxNamespace uses the passed in namespace for the spec. If a namespace of the same type already exists in the
-// spec, the existing namespace is replaced by the one provided.
-func WithLinuxNamespace(ns specs.LinuxNamespace) SpecOpts {
-	return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
-		setLinux(s)
-		for i, n := range s.Linux.Namespaces {
-			if n.Type == ns.Type {
-				before := s.Linux.Namespaces[:i]
-				after := s.Linux.Namespaces[i+1:]
-				s.Linux.Namespaces = append(before, ns)
-				s.Linux.Namespaces = append(s.Linux.Namespaces, after...)
-				return nil
-			}
-		}
-		s.Linux.Namespaces = append(s.Linux.Namespaces, ns)
-		return nil
-	}
-}
-
-// WithImageConfig configures the spec to from the configuration of an Image
-func WithImageConfig(image Image) SpecOpts {
-	return WithImageConfigArgs(image, nil)
-}
-
-// WithImageConfigArgs configures the spec to from the configuration of an Image with additional args that
-// replaces the CMD of the image
-func WithImageConfigArgs(image Image, args []string) SpecOpts {
-	return func(ctx context.Context, client Client, c *containers.Container, s *Spec) error {
-		ic, err := image.Config(ctx)
-		if err != nil {
-			return err
-		}
-		var (
-			ociimage v1.Image
-			config   v1.ImageConfig
-		)
-		switch ic.MediaType {
-		case v1.MediaTypeImageConfig, images.MediaTypeDockerSchema2Config:
-			p, err := content.ReadBlob(ctx, image.ContentStore(), ic)
-			if err != nil {
-				return err
-			}
-
-			if err := json.Unmarshal(p, &ociimage); err != nil {
-				return err
-			}
-			config = ociimage.Config
-		default:
-			return fmt.Errorf("unknown image config media type %s", ic.MediaType)
-		}
-
-		setProcess(s)
-		s.Process.Env = append(s.Process.Env, config.Env...)
-		cmd := config.Cmd
-		if len(args) > 0 {
-			cmd = args
-		}
-		s.Process.Args = append(config.Entrypoint, cmd...)
-
-		cwd := config.WorkingDir
-		if cwd == "" {
-			cwd = "/"
-		}
-		s.Process.Cwd = cwd
-		if config.User != "" {
-			return WithUser(config.User)(ctx, client, c, s)
-		}
-		return nil
-	}
-}
-
-// WithRootFSPath specifies unmanaged rootfs path.
-func WithRootFSPath(path string) SpecOpts {
-	return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
-		setRoot(s)
-		s.Root.Path = path
-		// Entrypoint is not set here (it's up to caller)
-		return nil
-	}
-}
-
-// WithRootFSReadonly sets specs.Root.Readonly to true
-func WithRootFSReadonly() SpecOpts {
-	return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
-		setRoot(s)
-		s.Root.Readonly = true
-		return nil
-	}
-}
-
-// WithNoNewPrivileges sets no_new_privileges on the process for the container
-func WithNoNewPrivileges(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
-	setProcess(s)
-	s.Process.NoNewPrivileges = true
-	return nil
-}
-
-// WithHostHostsFile bind-mounts the host's /etc/hosts into the container as readonly
-func WithHostHostsFile(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
-	s.Mounts = append(s.Mounts, specs.Mount{
-		Destination: "/etc/hosts",
-		Type:        "bind",
-		Source:      "/etc/hosts",
-		Options:     []string{"rbind", "ro"},
-	})
-	return nil
-}
-
-// WithHostResolvconf bind-mounts the host's /etc/resolv.conf into the container as readonly
-func WithHostResolvconf(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
-	s.Mounts = append(s.Mounts, specs.Mount{
-		Destination: "/etc/resolv.conf",
-		Type:        "bind",
-		Source:      "/etc/resolv.conf",
-		Options:     []string{"rbind", "ro"},
-	})
-	return nil
-}
-
-// WithHostLocaltime bind-mounts the host's /etc/localtime into the container as readonly
-func WithHostLocaltime(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
-	s.Mounts = append(s.Mounts, specs.Mount{
-		Destination: "/etc/localtime",
-		Type:        "bind",
-		Source:      "/etc/localtime",
-		Options:     []string{"rbind", "ro"},
-	})
-	return nil
-}
-
-// WithUserNamespace sets the uid and gid mappings for the task
-// this can be called multiple times to add more mappings to the generated spec
-func WithUserNamespace(container, host, size uint32) SpecOpts {
-	return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
-		var hasUserns bool
-		setLinux(s)
-		for _, ns := range s.Linux.Namespaces {
-			if ns.Type == specs.UserNamespace {
-				hasUserns = true
-				break
-			}
-		}
-		if !hasUserns {
-			s.Linux.Namespaces = append(s.Linux.Namespaces, specs.LinuxNamespace{
-				Type: specs.UserNamespace,
-			})
-		}
-		mapping := specs.LinuxIDMapping{
-			ContainerID: container,
-			HostID:      host,
-			Size:        size,
-		}
-		s.Linux.UIDMappings = append(s.Linux.UIDMappings, mapping)
-		s.Linux.GIDMappings = append(s.Linux.GIDMappings, mapping)
-		return nil
-	}
-}
-
-// WithCgroup sets the container's cgroup path
-func WithCgroup(path string) SpecOpts {
-	return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
-		setLinux(s)
-		s.Linux.CgroupsPath = path
-		return nil
-	}
-}
-
-// WithNamespacedCgroup uses the namespace set on the context to create a
-// root directory for containers in the cgroup with the id as the subcgroup
-func WithNamespacedCgroup() SpecOpts {
-	return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error {
-		namespace, err := namespaces.NamespaceRequired(ctx)
-		if err != nil {
-			return err
-		}
-		setLinux(s)
-		s.Linux.CgroupsPath = filepath.Join("/", namespace, c.ID)
-		return nil
-	}
-}
-
-// WithUser sets the user to be used within the container.
-// It accepts a valid user string in OCI Image Spec v1.0.0:
-//   user, uid, user:group, uid:gid, uid:group, user:gid
-func WithUser(userstr string) SpecOpts {
-	return func(ctx context.Context, client Client, c *containers.Container, s *Spec) error {
-		setProcess(s)
-		parts := strings.Split(userstr, ":")
-		switch len(parts) {
-		case 1:
-			v, err := strconv.Atoi(parts[0])
-			if err != nil {
-				// if we cannot parse as a uint they try to see if it is a username
-				return WithUsername(userstr)(ctx, client, c, s)
-			}
-			return WithUserID(uint32(v))(ctx, client, c, s)
-		case 2:
-			var (
-				username  string
-				groupname string
-			)
-			var uid, gid uint32
-			v, err := strconv.Atoi(parts[0])
-			if err != nil {
-				username = parts[0]
-			} else {
-				uid = uint32(v)
-			}
-			if v, err = strconv.Atoi(parts[1]); err != nil {
-				groupname = parts[1]
-			} else {
-				gid = uint32(v)
-			}
-			if username == "" && groupname == "" {
-				s.Process.User.UID, s.Process.User.GID = uid, gid
-				return nil
-			}
-			f := func(root string) error {
-				if username != "" {
-					uid, _, err = getUIDGIDFromPath(root, func(u user.User) bool {
-						return u.Name == username
-					})
-					if err != nil {
-						return err
-					}
-				}
-				if groupname != "" {
-					gid, err = getGIDFromPath(root, func(g user.Group) bool {
-						return g.Name == groupname
-					})
-					if err != nil {
-						return err
-					}
-				}
-				s.Process.User.UID, s.Process.User.GID = uid, gid
-				return nil
-			}
-			if c.Snapshotter == "" && c.SnapshotKey == "" {
-				if !isRootfsAbs(s.Root.Path) {
-					return errors.New("rootfs absolute path is required")
-				}
-				return f(s.Root.Path)
-			}
-			if c.Snapshotter == "" {
-				return errors.New("no snapshotter set for container")
-			}
-			if c.SnapshotKey == "" {
-				return errors.New("rootfs snapshot not created for container")
-			}
-			snapshotter := client.SnapshotService(c.Snapshotter)
-			mounts, err := snapshotter.Mounts(ctx, c.SnapshotKey)
-			if err != nil {
-				return err
-			}
-			return mount.WithTempMount(ctx, mounts, f)
-		default:
-			return fmt.Errorf("invalid USER value %s", userstr)
-		}
-	}
-}
-
-// WithUIDGID allows the UID and GID for the Process to be set
-func WithUIDGID(uid, gid uint32) SpecOpts {
-	return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
-		setProcess(s)
-		s.Process.User.UID = uid
-		s.Process.User.GID = gid
-		return nil
-	}
-}
-
-// WithUserID sets the correct UID and GID for the container based
-// on the image's /etc/passwd contents. If /etc/passwd does not exist,
-// or uid is not found in /etc/passwd, it sets the requested uid,
-// additionally sets the gid to 0, and does not return an error.
-func WithUserID(uid uint32) SpecOpts {
-	return func(ctx context.Context, client Client, c *containers.Container, s *Spec) (err error) {
-		setProcess(s)
-		if c.Snapshotter == "" && c.SnapshotKey == "" {
-			if !isRootfsAbs(s.Root.Path) {
-				return errors.Errorf("rootfs absolute path is required")
-			}
-			uuid, ugid, err := getUIDGIDFromPath(s.Root.Path, func(u user.User) bool {
-				return u.Uid == int(uid)
-			})
-			if err != nil {
-				if os.IsNotExist(err) || err == errNoUsersFound {
-					s.Process.User.UID, s.Process.User.GID = uid, 0
-					return nil
-				}
-				return err
-			}
-			s.Process.User.UID, s.Process.User.GID = uuid, ugid
-			return nil
-
-		}
-		if c.Snapshotter == "" {
-			return errors.Errorf("no snapshotter set for container")
-		}
-		if c.SnapshotKey == "" {
-			return errors.Errorf("rootfs snapshot not created for container")
-		}
-		snapshotter := client.SnapshotService(c.Snapshotter)
-		mounts, err := snapshotter.Mounts(ctx, c.SnapshotKey)
-		if err != nil {
-			return err
-		}
-		return mount.WithTempMount(ctx, mounts, func(root string) error {
-			uuid, ugid, err := getUIDGIDFromPath(root, func(u user.User) bool {
-				return u.Uid == int(uid)
-			})
-			if err != nil {
-				if os.IsNotExist(err) || err == errNoUsersFound {
-					s.Process.User.UID, s.Process.User.GID = uid, 0
-					return nil
-				}
-				return err
-			}
-			s.Process.User.UID, s.Process.User.GID = uuid, ugid
-			return nil
-		})
-	}
-}
-
-// WithUsername sets the correct UID and GID for the container
-// based on the the image's /etc/passwd contents. If /etc/passwd
-// does not exist, or the username is not found in /etc/passwd,
-// it returns error.
-func WithUsername(username string) SpecOpts {
-	return func(ctx context.Context, client Client, c *containers.Container, s *Spec) (err error) {
-		setProcess(s)
-		if c.Snapshotter == "" && c.SnapshotKey == "" {
-			if !isRootfsAbs(s.Root.Path) {
-				return errors.Errorf("rootfs absolute path is required")
-			}
-			uid, gid, err := getUIDGIDFromPath(s.Root.Path, func(u user.User) bool {
-				return u.Name == username
-			})
-			if err != nil {
-				return err
-			}
-			s.Process.User.UID, s.Process.User.GID = uid, gid
-			return nil
-		}
-		if c.Snapshotter == "" {
-			return errors.Errorf("no snapshotter set for container")
-		}
-		if c.SnapshotKey == "" {
-			return errors.Errorf("rootfs snapshot not created for container")
-		}
-		snapshotter := client.SnapshotService(c.Snapshotter)
-		mounts, err := snapshotter.Mounts(ctx, c.SnapshotKey)
-		if err != nil {
-			return err
-		}
-		return mount.WithTempMount(ctx, mounts, func(root string) error {
-			uid, gid, err := getUIDGIDFromPath(root, func(u user.User) bool {
-				return u.Name == username
-			})
-			if err != nil {
-				return err
-			}
-			s.Process.User.UID, s.Process.User.GID = uid, gid
-			return nil
-		})
-	}
-}
-
-// WithCapabilities sets Linux capabilities on the process
-func WithCapabilities(caps []string) SpecOpts {
-	return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
-		setCapabilities(s)
-
-		s.Process.Capabilities.Bounding = caps
-		s.Process.Capabilities.Effective = caps
-		s.Process.Capabilities.Permitted = caps
-		s.Process.Capabilities.Inheritable = caps
-
-		return nil
-	}
-}
-
-// WithAllCapabilities sets all linux capabilities for the process
-var WithAllCapabilities = WithCapabilities(getAllCapabilities())
-
-func getAllCapabilities() []string {
-	last := capability.CAP_LAST_CAP
-	// hack for RHEL6 which has no /proc/sys/kernel/cap_last_cap
-	if last == capability.Cap(63) {
-		last = capability.CAP_BLOCK_SUSPEND
-	}
-	var caps []string
-	for _, cap := range capability.List() {
-		if cap > last {
-			continue
-		}
-		caps = append(caps, "CAP_"+strings.ToUpper(cap.String()))
-	}
-	return caps
-}
-
-// WithAmbientCapabilities set the Linux ambient capabilities for the process
-// Ambient capabilities should only be set for non-root users or the caller should
-// understand how these capabilities are used and set
-func WithAmbientCapabilities(caps []string) SpecOpts {
-	return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
-		setCapabilities(s)
-
-		s.Process.Capabilities.Ambient = caps
-		return nil
-	}
-}
-
-var errNoUsersFound = errors.New("no users found")
-
-func getUIDGIDFromPath(root string, filter func(user.User) bool) (uid, gid uint32, err error) {
-	ppath, err := fs.RootPath(root, "/etc/passwd")
-	if err != nil {
-		return 0, 0, err
-	}
-	users, err := user.ParsePasswdFileFilter(ppath, filter)
-	if err != nil {
-		return 0, 0, err
-	}
-	if len(users) == 0 {
-		return 0, 0, errNoUsersFound
-	}
-	u := users[0]
-	return uint32(u.Uid), uint32(u.Gid), nil
-}
-
-var errNoGroupsFound = errors.New("no groups found")
-
-func getGIDFromPath(root string, filter func(user.Group) bool) (gid uint32, err error) {
-	gpath, err := fs.RootPath(root, "/etc/group")
-	if err != nil {
-		return 0, err
-	}
-	groups, err := user.ParseGroupFileFilter(gpath, filter)
-	if err != nil {
-		return 0, err
-	}
-	if len(groups) == 0 {
-		return 0, errNoGroupsFound
-	}
-	g := groups[0]
-	return uint32(g.Gid), nil
-}
-
-func isRootfsAbs(root string) bool {
-	return filepath.IsAbs(root)
-}
-
-// WithMaskedPaths sets the masked paths option
-func WithMaskedPaths(paths []string) SpecOpts {
-	return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
-		setLinux(s)
-		s.Linux.MaskedPaths = paths
-		return nil
-	}
-}
-
-// WithReadonlyPaths sets the read only paths option
-func WithReadonlyPaths(paths []string) SpecOpts {
-	return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
-		setLinux(s)
-		s.Linux.ReadonlyPaths = paths
-		return nil
-	}
-}
-
-// WithWriteableSysfs makes any sysfs mounts writeable
-func WithWriteableSysfs(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
-	for i, m := range s.Mounts {
-		if m.Type == "sysfs" {
-			var options []string
-			for _, o := range m.Options {
-				if o == "ro" {
-					o = "rw"
-				}
-				options = append(options, o)
-			}
-			s.Mounts[i].Options = options
-		}
-	}
-	return nil
-}
-
-// WithWriteableCgroupfs makes any cgroup mounts writeable
-func WithWriteableCgroupfs(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
-	for i, m := range s.Mounts {
-		if m.Type == "cgroup" {
-			var options []string
-			for _, o := range m.Options {
-				if o == "ro" {
-					o = "rw"
-				}
-				options = append(options, o)
-			}
-			s.Mounts[i].Options = options
-		}
-	}
-	return nil
-}
-
-// WithSelinuxLabel sets the process SELinux label
-func WithSelinuxLabel(label string) SpecOpts {
-	return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
-		setProcess(s)
-		s.Process.SelinuxLabel = label
-		return nil
-	}
-}
-
-// WithApparmorProfile sets the Apparmor profile for the process
-func WithApparmorProfile(profile string) SpecOpts {
-	return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
-		setProcess(s)
-		s.Process.ApparmorProfile = profile
-		return nil
-	}
-}
-
-// WithSeccompUnconfined clears the seccomp profile
-func WithSeccompUnconfined(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
-	setLinux(s)
-	s.Linux.Seccomp = nil
-	return nil
-}
-
-// WithParentCgroupDevices uses the default cgroup setup to inherit the container's parent cgroup's
-// allowed and denied devices
-func WithParentCgroupDevices(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
-	setLinux(s)
-	if s.Linux.Resources == nil {
-		s.Linux.Resources = &specs.LinuxResources{}
-	}
-	s.Linux.Resources.Devices = nil
-	return nil
-}
-
-// WithDefaultUnixDevices adds the default devices for unix such as /dev/null, /dev/random to
-// the container's resource cgroup spec
-func WithDefaultUnixDevices(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
-	setLinux(s)
-	if s.Linux.Resources == nil {
-		s.Linux.Resources = &specs.LinuxResources{}
-	}
-	intptr := func(i int64) *int64 {
-		return &i
-	}
-	s.Linux.Resources.Devices = append(s.Linux.Resources.Devices, []specs.LinuxDeviceCgroup{
-		{
-			// "/dev/null",
-			Type:   "c",
-			Major:  intptr(1),
-			Minor:  intptr(3),
-			Access: rwm,
-			Allow:  true,
-		},
-		{
-			// "/dev/random",
-			Type:   "c",
-			Major:  intptr(1),
-			Minor:  intptr(8),
-			Access: rwm,
-			Allow:  true,
-		},
-		{
-			// "/dev/full",
-			Type:   "c",
-			Major:  intptr(1),
-			Minor:  intptr(7),
-			Access: rwm,
-			Allow:  true,
-		},
-		{
-			// "/dev/tty",
-			Type:   "c",
-			Major:  intptr(5),
-			Minor:  intptr(0),
-			Access: rwm,
-			Allow:  true,
-		},
-		{
-			// "/dev/zero",
-			Type:   "c",
-			Major:  intptr(1),
-			Minor:  intptr(5),
-			Access: rwm,
-			Allow:  true,
-		},
-		{
-			// "/dev/urandom",
-			Type:   "c",
-			Major:  intptr(1),
-			Minor:  intptr(9),
-			Access: rwm,
-			Allow:  true,
-		},
-		{
-			// "/dev/console",
-			Type:   "c",
-			Major:  intptr(5),
-			Minor:  intptr(1),
-			Access: rwm,
-			Allow:  true,
-		},
-		// /dev/pts/ - pts namespaces are "coming soon"
-		{
-			Type:   "c",
-			Major:  intptr(136),
-			Access: rwm,
-			Allow:  true,
-		},
-		{
-			Type:   "c",
-			Major:  intptr(5),
-			Minor:  intptr(2),
-			Access: rwm,
-			Allow:  true,
-		},
-		{
-			// tuntap
-			Type:   "c",
-			Major:  intptr(10),
-			Minor:  intptr(200),
-			Access: rwm,
-			Allow:  true,
-		},
-	}...)
-	return nil
-}
-
-// WithPrivileged sets up options for a privileged container
-// TODO(justincormack) device handling
-var WithPrivileged = Compose(
-	WithAllCapabilities,
-	WithMaskedPaths(nil),
-	WithReadonlyPaths(nil),
-	WithWriteableSysfs,
-	WithWriteableCgroupfs,
-	WithSelinuxLabel(""),
-	WithApparmorProfile(""),
-	WithSeccompUnconfined,
-)

+ 0 - 89
vendor/github.com/containerd/containerd/oci/spec_opts_windows.go

@@ -1,89 +0,0 @@
-// +build windows
-
-/*
-   Copyright The containerd Authors.
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
-*/
-
-package oci
-
-import (
-	"context"
-	"encoding/json"
-	"fmt"
-
-	"github.com/containerd/containerd/containers"
-	"github.com/containerd/containerd/content"
-	"github.com/containerd/containerd/images"
-	"github.com/opencontainers/image-spec/specs-go/v1"
-	specs "github.com/opencontainers/runtime-spec/specs-go"
-)
-
-// WithImageConfig configures the spec to from the configuration of an Image
-func WithImageConfig(image Image) SpecOpts {
-	return func(ctx context.Context, client Client, _ *containers.Container, s *Spec) error {
-		setProcess(s)
-		ic, err := image.Config(ctx)
-		if err != nil {
-			return err
-		}
-		var (
-			ociimage v1.Image
-			config   v1.ImageConfig
-		)
-		switch ic.MediaType {
-		case v1.MediaTypeImageConfig, images.MediaTypeDockerSchema2Config:
-			p, err := content.ReadBlob(ctx, image.ContentStore(), ic)
-			if err != nil {
-				return err
-			}
-			if err := json.Unmarshal(p, &ociimage); err != nil {
-				return err
-			}
-			config = ociimage.Config
-		default:
-			return fmt.Errorf("unknown image config media type %s", ic.MediaType)
-		}
-		s.Process.Env = config.Env
-		s.Process.Args = append(config.Entrypoint, config.Cmd...)
-		s.Process.User = specs.User{
-			Username: config.User,
-		}
-		return nil
-	}
-}
-
-// WithTTY sets the information on the spec as well as the environment variables for
-// using a TTY
-func WithTTY(width, height int) SpecOpts {
-	return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
-		setProcess(s)
-		s.Process.Terminal = true
-		if s.Process.ConsoleSize == nil {
-			s.Process.ConsoleSize = &specs.Box{}
-		}
-		s.Process.ConsoleSize.Width = uint(width)
-		s.Process.ConsoleSize.Height = uint(height)
-		return nil
-	}
-}
-
-// WithUsername sets the username on the process
-func WithUsername(username string) SpecOpts {
-	return func(ctx context.Context, client Client, c *containers.Container, s *Spec) error {
-		setProcess(s)
-		s.Process.User.Username = username
-		return nil
-	}
-}

+ 0 - 188
vendor/github.com/containerd/containerd/oci/spec_unix.go

@@ -1,188 +0,0 @@
-// +build !windows
-
-/*
-   Copyright The containerd Authors.
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
-*/
-
-package oci
-
-import (
-	"context"
-	"path/filepath"
-
-	"github.com/containerd/containerd/namespaces"
-	specs "github.com/opencontainers/runtime-spec/specs-go"
-)
-
-const (
-	rwm               = "rwm"
-	defaultRootfsPath = "rootfs"
-)
-
-var (
-	defaultEnv = []string{
-		"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
-	}
-)
-
-func defaultCaps() []string {
-	return []string{
-		"CAP_CHOWN",
-		"CAP_DAC_OVERRIDE",
-		"CAP_FSETID",
-		"CAP_FOWNER",
-		"CAP_MKNOD",
-		"CAP_NET_RAW",
-		"CAP_SETGID",
-		"CAP_SETUID",
-		"CAP_SETFCAP",
-		"CAP_SETPCAP",
-		"CAP_NET_BIND_SERVICE",
-		"CAP_SYS_CHROOT",
-		"CAP_KILL",
-		"CAP_AUDIT_WRITE",
-	}
-}
-
-func defaultNamespaces() []specs.LinuxNamespace {
-	return []specs.LinuxNamespace{
-		{
-			Type: specs.PIDNamespace,
-		},
-		{
-			Type: specs.IPCNamespace,
-		},
-		{
-			Type: specs.UTSNamespace,
-		},
-		{
-			Type: specs.MountNamespace,
-		},
-		{
-			Type: specs.NetworkNamespace,
-		},
-	}
-}
-
-func populateDefaultSpec(ctx context.Context, s *Spec, id string) error {
-	ns, err := namespaces.NamespaceRequired(ctx)
-	if err != nil {
-		return err
-	}
-
-	*s = Spec{
-		Version: specs.Version,
-		Root: &specs.Root{
-			Path: defaultRootfsPath,
-		},
-		Process: &specs.Process{
-			Env:             defaultEnv,
-			Cwd:             "/",
-			NoNewPrivileges: true,
-			User: specs.User{
-				UID: 0,
-				GID: 0,
-			},
-			Capabilities: &specs.LinuxCapabilities{
-				Bounding:    defaultCaps(),
-				Permitted:   defaultCaps(),
-				Inheritable: defaultCaps(),
-				Effective:   defaultCaps(),
-			},
-			Rlimits: []specs.POSIXRlimit{
-				{
-					Type: "RLIMIT_NOFILE",
-					Hard: uint64(1024),
-					Soft: uint64(1024),
-				},
-			},
-		},
-		Mounts: []specs.Mount{
-			{
-				Destination: "/proc",
-				Type:        "proc",
-				Source:      "proc",
-			},
-			{
-				Destination: "/dev",
-				Type:        "tmpfs",
-				Source:      "tmpfs",
-				Options:     []string{"nosuid", "strictatime", "mode=755", "size=65536k"},
-			},
-			{
-				Destination: "/dev/pts",
-				Type:        "devpts",
-				Source:      "devpts",
-				Options:     []string{"nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5"},
-			},
-			{
-				Destination: "/dev/shm",
-				Type:        "tmpfs",
-				Source:      "shm",
-				Options:     []string{"nosuid", "noexec", "nodev", "mode=1777", "size=65536k"},
-			},
-			{
-				Destination: "/dev/mqueue",
-				Type:        "mqueue",
-				Source:      "mqueue",
-				Options:     []string{"nosuid", "noexec", "nodev"},
-			},
-			{
-				Destination: "/sys",
-				Type:        "sysfs",
-				Source:      "sysfs",
-				Options:     []string{"nosuid", "noexec", "nodev", "ro"},
-			},
-			{
-				Destination: "/run",
-				Type:        "tmpfs",
-				Source:      "tmpfs",
-				Options:     []string{"nosuid", "strictatime", "mode=755", "size=65536k"},
-			},
-		},
-		Linux: &specs.Linux{
-			MaskedPaths: []string{
-				"/proc/acpi",
-				"/proc/kcore",
-				"/proc/keys",
-				"/proc/latency_stats",
-				"/proc/timer_list",
-				"/proc/timer_stats",
-				"/proc/sched_debug",
-				"/sys/firmware",
-				"/proc/scsi",
-			},
-			ReadonlyPaths: []string{
-				"/proc/asound",
-				"/proc/bus",
-				"/proc/fs",
-				"/proc/irq",
-				"/proc/sys",
-				"/proc/sysrq-trigger",
-			},
-			CgroupsPath: filepath.Join("/", ns, id),
-			Resources: &specs.LinuxResources{
-				Devices: []specs.LinuxDeviceCgroup{
-					{
-						Allow:  false,
-						Access: rwm,
-					},
-				},
-			},
-			Namespaces: defaultNamespaces(),
-		},
-	}
-	return nil
-}

+ 0 - 5
vendor/github.com/containerd/containerd/platforms/defaults.go

@@ -22,11 +22,6 @@ import (
 	specs "github.com/opencontainers/image-spec/specs-go/v1"
 )
 
-// Default returns the default matcher for the platform.
-func Default() MatchComparer {
-	return Only(DefaultSpec())
-}
-
 // DefaultString returns the default string specifier for the platform.
 func DefaultString() string {
 	return Format(DefaultSpec())

+ 6 - 26
vendor/github.com/containerd/containerd/oci/spec_windows.go → vendor/github.com/containerd/containerd/platforms/defaults_unix.go

@@ -1,3 +1,5 @@
+// +build !windows
+
 /*
    Copyright The containerd Authors.
 
@@ -14,31 +16,9 @@
    limitations under the License.
 */
 
-package oci
-
-import (
-	"context"
-
-	specs "github.com/opencontainers/runtime-spec/specs-go"
-)
+package platforms
 
-func populateDefaultSpec(ctx context.Context, s *Spec, id string) error {
-	*s = Spec{
-		Version: specs.Version,
-		Root:    &specs.Root{},
-		Process: &specs.Process{
-			Cwd: `C:\`,
-			ConsoleSize: &specs.Box{
-				Width:  80,
-				Height: 20,
-			},
-		},
-		Windows: &specs.Windows{
-			IgnoreFlushesDuringBoot: true,
-			Network: &specs.WindowsNetwork{
-				AllowUnqualifiedDNSQuery: true,
-			},
-		},
-	}
-	return nil
+// Default returns the default matcher for the platform.
+func Default() MatchComparer {
+	return Only(DefaultSpec())
 }

+ 10 - 10
vendor/github.com/containerd/containerd/task_opts_windows.go → vendor/github.com/containerd/containerd/platforms/defaults_windows.go

@@ -1,3 +1,5 @@
+// +build windows
+
 /*
    Copyright The containerd Authors.
 
@@ -14,18 +16,16 @@
    limitations under the License.
 */
 
-package containerd
+package platforms
 
 import (
-	"context"
-
-	specs "github.com/opencontainers/runtime-spec/specs-go"
+	specs "github.com/opencontainers/image-spec/specs-go/v1"
 )
 
-// WithResources sets the provided resources on the spec for task updates
-func WithResources(resources *specs.WindowsResources) UpdateTaskOpts {
-	return func(ctx context.Context, client *Client, r *UpdateTaskInfo) error {
-		r.Resources = resources
-		return nil
-	}
+// Default returns the default matcher for the platform.
+func Default() MatchComparer {
+	return Ordered(DefaultSpec(), specs.Platform{
+		OS:           "linux",
+		Architecture: "amd64",
+	})
 }

+ 1 - 1
vendor/github.com/containerd/containerd/remotes/docker/fetcher.go

@@ -117,7 +117,7 @@ func (r dockerFetcher) open(ctx context.Context, u, mediatype string, offset int
 			}
 		} else {
 			// TODO: Should any cases where use of content range
-			// without the proper header be considerd?
+			// without the proper header be considered?
 			// 206 responses?
 
 			// Discard up to offset

+ 1 - 1
vendor/github.com/containerd/containerd/remotes/docker/httpreadseeker.go

@@ -134,7 +134,7 @@ func (hrs *httpReadSeeker) reader() (io.Reader, error) {
 		// There is an edge case here where offset == size of the content. If
 		// we seek, we will probably get an error for content that cannot be
 		// sought (?). In that case, we should err on committing the content,
-		// as the length is already satisified but we just return the empty
+		// as the length is already satisfied but we just return the empty
 		// reader instead.
 
 		hrs.rc = ioutil.NopCloser(bytes.NewReader([]byte{}))

+ 53 - 2
vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go

@@ -272,8 +272,14 @@ func (c *Converter) fetchBlob(ctx context.Context, desc ocispec.Descriptor) erro
 			return err
 		}
 
-		// TODO: Check if blob -> diff id mapping already exists
-		// TODO: Check if blob empty label exists
+		reuse, err := c.reuseLabelBlobState(ctx, desc)
+		if err != nil {
+			return err
+		}
+
+		if reuse {
+			return nil
+		}
 
 		ra, err := c.contentStore.ReaderAt(ctx, desc)
 		if err != nil {
@@ -343,6 +349,17 @@ func (c *Converter) fetchBlob(ctx context.Context, desc ocispec.Descriptor) erro
 
 	state := calc.State()
 
+	cinfo := content.Info{
+		Digest: desc.Digest,
+		Labels: map[string]string{
+			"containerd.io/uncompressed": state.diffID.String(),
+		},
+	}
+
+	if _, err := c.contentStore.Update(ctx, cinfo, "labels.containerd.io/uncompressed"); err != nil {
+		return errors.Wrap(err, "failed to update uncompressed label")
+	}
+
 	c.mu.Lock()
 	c.blobMap[desc.Digest] = state
 	c.layerBlobs[state.diffID] = desc
@@ -351,6 +368,40 @@ func (c *Converter) fetchBlob(ctx context.Context, desc ocispec.Descriptor) erro
 	return nil
 }
 
+func (c *Converter) reuseLabelBlobState(ctx context.Context, desc ocispec.Descriptor) (bool, error) {
+	cinfo, err := c.contentStore.Info(ctx, desc.Digest)
+	if err != nil {
+		return false, errors.Wrap(err, "failed to get blob info")
+	}
+	desc.Size = cinfo.Size
+
+	diffID, ok := cinfo.Labels["containerd.io/uncompressed"]
+	if !ok {
+		return false, nil
+	}
+
+	bState := blobState{empty: false}
+
+	if bState.diffID, err = digest.Parse(diffID); err != nil {
+		log.G(ctx).WithField("id", desc.Digest).Warnf("failed to parse digest from label containerd.io/uncompressed: %v", diffID)
+		return false, nil
+	}
+
+	// NOTE: there is no need to read header to get compression method
+	// because there are only two kinds of methods.
+	if bState.diffID == desc.Digest {
+		desc.MediaType = images.MediaTypeDockerSchema2Layer
+	} else {
+		desc.MediaType = images.MediaTypeDockerSchema2LayerGzip
+	}
+
+	c.mu.Lock()
+	c.blobMap[desc.Digest] = bState
+	c.layerBlobs[bState.diffID] = desc
+	c.mu.Unlock()
+	return true, nil
+}
+
 func (c *Converter) schema1ManifestHistory() ([]ocispec.History, []digest.Digest, error) {
 	if c.pulledManifest == nil {
 		return nil, nil, errors.New("missing schema 1 manifest for conversion")

+ 1 - 1
vendor/github.com/containerd/containerd/runtime/v1/linux/proc/exec.go

@@ -147,7 +147,7 @@ func (e *execProcess) start(ctx context.Context) (err error) {
 			return errors.Wrap(err, "creating new NULL IO")
 		}
 	} else {
-		if e.io, err = runc.NewPipeIO(e.parent.IoUID, e.parent.IoGID); err != nil {
+		if e.io, err = runc.NewPipeIO(e.parent.IoUID, e.parent.IoGID, withConditionalIO(e.stdio)); err != nil {
 			return errors.Wrap(err, "failed to create runc io pipes")
 		}
 	}

+ 4 - 4
vendor/github.com/containerd/containerd/runtime/v1/linux/proc/exec_state.go

@@ -60,11 +60,11 @@ func (s *execCreatedState) Start(ctx context.Context) error {
 }
 
 func (s *execCreatedState) Delete(ctx context.Context) error {
-	s.p.mu.Lock()
-	defer s.p.mu.Unlock()
 	if err := s.p.delete(ctx); err != nil {
 		return err
 	}
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
 	return s.transition("deleted")
 }
 
@@ -168,11 +168,11 @@ func (s *execStoppedState) Start(ctx context.Context) error {
 }
 
 func (s *execStoppedState) Delete(ctx context.Context) error {
-	s.p.mu.Lock()
-	defer s.p.mu.Unlock()
 	if err := s.p.delete(ctx); err != nil {
 		return err
 	}
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
 	return s.transition("deleted")
 }
 

+ 10 - 3
vendor/github.com/containerd/containerd/runtime/v1/linux/proc/init.go

@@ -123,7 +123,7 @@ func (p *Init) Create(ctx context.Context, r *CreateConfig) error {
 			return errors.Wrap(err, "creating new NULL IO")
 		}
 	} else {
-		if p.io, err = runc.NewPipeIO(p.IoUID, p.IoGID); err != nil {
+		if p.io, err = runc.NewPipeIO(p.IoUID, p.IoGID, withConditionalIO(p.stdio)); err != nil {
 			return errors.Wrap(err, "failed to create OCI runtime io pipes")
 		}
 	}
@@ -228,7 +228,7 @@ func (p *Init) Status(ctx context.Context) (string, error) {
 	defer p.mu.Unlock()
 	c, err := p.runtime.State(ctx, p.id)
 	if err != nil {
-		if os.IsNotExist(err) {
+		if strings.Contains(err.Error(), "does not exist") {
 			return "stopped", nil
 		}
 		return "", p.runtimeError(err, "OCI runtime state failed")
@@ -249,7 +249,6 @@ func (p *Init) setExited(status int) {
 }
 
 func (p *Init) delete(context context.Context) error {
-	p.KillAll(context)
 	p.wg.Wait()
 	err := p.runtime.Delete(context, p.id, nil)
 	// ignore errors if a runtime has already deleted the process
@@ -400,3 +399,11 @@ func (p *Init) runtimeError(rErr error, msg string) error {
 		return errors.Errorf("%s: %s", msg, rMsg)
 	}
 }
+
+func withConditionalIO(c proc.Stdio) runc.IOOpt {
+	return func(o *runc.IOOption) {
+		o.OpenStdin = c.Stdin != ""
+		o.OpenStdout = c.Stdout != ""
+		o.OpenStderr = c.Stderr != ""
+	}
+}

+ 0 - 1
vendor/github.com/containerd/containerd/runtime/v1/linux/proc/io.go

@@ -109,7 +109,6 @@ func copyPipes(ctx context.Context, rio runc.IO, stdin, stdout, stderr string, w
 		i.dest(fw, fr)
 	}
 	if stdin == "" {
-		rio.Stdin().Close()
 		return nil
 	}
 	f, err := fifo.OpenFifo(ctx, stdin, syscall.O_RDONLY|syscall.O_NONBLOCK, 0)

+ 4 - 12
vendor/github.com/containerd/containerd/runtime/v1/linux/runtime.go

@@ -26,7 +26,6 @@ import (
 	"path/filepath"
 	"time"
 
-	"github.com/boltdb/bolt"
 	eventstypes "github.com/containerd/containerd/api/events"
 	"github.com/containerd/containerd/api/types"
 	"github.com/containerd/containerd/containers"
@@ -49,6 +48,7 @@ import (
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/pkg/errors"
 	"github.com/sirupsen/logrus"
+	bolt "go.etcd.io/bbolt"
 	"golang.org/x/sys/unix"
 )
 
@@ -204,7 +204,7 @@ func (r *Runtime) Create(ctx context.Context, id string, opts runtime.CreateOpts
 				log.G(ctx).WithError(err).WithFields(logrus.Fields{
 					"id":        id,
 					"namespace": namespace,
-				}).Warn("failed to clen up after killed shim")
+				}).Warn("failed to clean up after killed shim")
 			}
 		}
 		shimopt = ShimRemote(r.config, r.address, cgroup, exitHandler)
@@ -248,8 +248,7 @@ func (r *Runtime) Create(ctx context.Context, id string, opts runtime.CreateOpts
 	if err != nil {
 		return nil, errdefs.FromGRPC(err)
 	}
-	t, err := newTask(id, namespace, int(cr.Pid), s, r.events,
-		proc.NewRunc(ropts.RuntimeRoot, sopts.Bundle, namespace, rt, ropts.CriuPath, ropts.SystemdCgroup), r.tasks, bundle)
+	t, err := newTask(id, namespace, int(cr.Pid), s, r.events, r.tasks, bundle)
 	if err != nil {
 		return nil, err
 	}
@@ -341,15 +340,8 @@ func (r *Runtime) loadTasks(ctx context.Context, ns string) ([]*Task, error) {
 			}
 			continue
 		}
-		ropts, err := r.getRuncOptions(ctx, id)
-		if err != nil {
-			log.G(ctx).WithError(err).WithField("id", id).
-				Error("get runtime options")
-			continue
-		}
 
-		t, err := newTask(id, ns, pid, s, r.events,
-			proc.NewRunc(ropts.RuntimeRoot, bundle.path, ns, ropts.Runtime, ropts.CriuPath, ropts.SystemdCgroup), r.tasks, bundle)
+		t, err := newTask(id, ns, pid, s, r.events, r.tasks, bundle)
 		if err != nil {
 			log.G(ctx).WithError(err).Error("loading task type")
 			continue

+ 2 - 3
vendor/github.com/containerd/containerd/runtime/v1/linux/task.go

@@ -31,8 +31,7 @@ import (
 	"github.com/containerd/containerd/log"
 	"github.com/containerd/containerd/runtime"
 	"github.com/containerd/containerd/runtime/v1/shim/client"
-	shim "github.com/containerd/containerd/runtime/v1/shim/v1"
-	runc "github.com/containerd/go-runc"
+	"github.com/containerd/containerd/runtime/v1/shim/v1"
 	"github.com/containerd/ttrpc"
 	"github.com/containerd/typeurl"
 	"github.com/gogo/protobuf/types"
@@ -52,7 +51,7 @@ type Task struct {
 	bundle    *bundle
 }
 
-func newTask(id, namespace string, pid int, shim *client.Client, events *exchange.Exchange, runtime *runc.Runc, list *runtime.TaskList, bundle *bundle) (*Task, error) {
+func newTask(id, namespace string, pid int, shim *client.Client, events *exchange.Exchange, list *runtime.TaskList, bundle *bundle) (*Task, error) {
 	var (
 		err error
 		cg  cgroups.Cgroup

+ 40 - 7
vendor/github.com/containerd/containerd/runtime/v1/shim/service.go

@@ -20,7 +20,9 @@ package shim
 
 import (
 	"context"
+	"encoding/json"
 	"fmt"
+	"io/ioutil"
 	"os"
 	"path/filepath"
 	"sync"
@@ -41,6 +43,7 @@ import (
 	runc "github.com/containerd/go-runc"
 	"github.com/containerd/typeurl"
 	ptypes "github.com/gogo/protobuf/types"
+	specs "github.com/opencontainers/runtime-spec/specs-go"
 	"github.com/pkg/errors"
 	"github.com/sirupsen/logrus"
 	"google.golang.org/grpc/codes"
@@ -221,19 +224,21 @@ func (s *Service) Delete(ctx context.Context, r *ptypes.Empty) (*shimapi.DeleteR
 
 // DeleteProcess deletes an exec'd process
 func (s *Service) DeleteProcess(ctx context.Context, r *shimapi.DeleteProcessRequest) (*shimapi.DeleteResponse, error) {
-	s.mu.Lock()
-	defer s.mu.Unlock()
 	if r.ID == s.id {
 		return nil, status.Errorf(codes.InvalidArgument, "cannot delete init process with DeleteProcess")
 	}
+	s.mu.Lock()
 	p := s.processes[r.ID]
+	s.mu.Unlock()
 	if p == nil {
 		return nil, errors.Wrapf(errdefs.ErrNotFound, "process %s", r.ID)
 	}
 	if err := p.Delete(ctx); err != nil {
 		return nil, err
 	}
+	s.mu.Lock()
 	delete(s.processes, r.ID)
+	s.mu.Unlock()
 	return &shimapi.DeleteResponse{
 		ExitStatus: uint32(p.ExitStatus()),
 		ExitedAt:   p.ExitedAt(),
@@ -507,13 +512,22 @@ func (s *Service) processExits() {
 func (s *Service) checkProcesses(e runc.Exit) {
 	s.mu.Lock()
 	defer s.mu.Unlock()
+
+	shouldKillAll, err := shouldKillAllOnExit(s.bundle)
+	if err != nil {
+		log.G(s.context).WithError(err).Error("failed to check shouldKillAll")
+	}
+
 	for _, p := range s.processes {
 		if p.Pid() == e.Pid {
-			if ip, ok := p.(*proc.Init); ok {
-				// Ensure all children are killed
-				if err := ip.KillAll(s.context); err != nil {
-					log.G(s.context).WithError(err).WithField("id", ip.ID()).
-						Error("failed to kill init's children")
+
+			if shouldKillAll {
+				if ip, ok := p.(*proc.Init); ok {
+					// Ensure all children are killed
+					if err := ip.KillAll(s.context); err != nil {
+						log.G(s.context).WithError(err).WithField("id", ip.ID()).
+							Error("failed to kill init's children")
+					}
 				}
 			}
 			p.SetExited(e.Status)
@@ -529,6 +543,25 @@ func (s *Service) checkProcesses(e runc.Exit) {
 	}
 }
 
+func shouldKillAllOnExit(bundlePath string) (bool, error) {
+	var bundleSpec specs.Spec
+	bundleConfigContents, err := ioutil.ReadFile(filepath.Join(bundlePath, "config.json"))
+	if err != nil {
+		return false, err
+	}
+	json.Unmarshal(bundleConfigContents, &bundleSpec)
+
+	if bundleSpec.Linux != nil {
+		for _, ns := range bundleSpec.Linux.Namespaces {
+			if ns.Type == specs.PIDNamespace {
+				return false, nil
+			}
+		}
+	}
+
+	return true, nil
+}
+
 func (s *Service) getContainerPids(ctx context.Context, id string) ([]uint32, error) {
 	s.mu.Lock()
 	defer s.mu.Unlock()

+ 1 - 1
vendor/github.com/containerd/containerd/services/server/server.go

@@ -29,7 +29,6 @@ import (
 	"sync"
 	"time"
 
-	"github.com/boltdb/bolt"
 	csapi "github.com/containerd/containerd/api/services/content/v1"
 	ssapi "github.com/containerd/containerd/api/services/snapshots/v1"
 	"github.com/containerd/containerd/content"
@@ -46,6 +45,7 @@ import (
 	metrics "github.com/docker/go-metrics"
 	grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
 	"github.com/pkg/errors"
+	bolt "go.etcd.io/bbolt"
 	"google.golang.org/grpc"
 )
 

+ 1 - 1
vendor/github.com/containerd/containerd/sys/socket_unix.go

@@ -42,7 +42,7 @@ func CreateUnixSocket(path string) (net.Listener, error) {
 	return net.Listen("unix", path)
 }
 
-// GetLocalListener returns a listerner out of a unix socket.
+// GetLocalListener returns a listener out of a unix socket.
 func GetLocalListener(path string, uid, gid int) (net.Listener, error) {
 	// Ensure parent directory is created
 	if err := mkdirAs(filepath.Dir(path), uid, gid); err != nil {

+ 4 - 1
vendor/github.com/containerd/containerd/task.go

@@ -607,8 +607,11 @@ func writeContent(ctx context.Context, store content.Ingester, mediaType, ref st
 	if err != nil {
 		return d, err
 	}
+
 	if err := writer.Commit(ctx, size, "", opts...); err != nil {
-		return d, err
+		if !errdefs.IsAlreadyExists(err) {
+			return d, err
+		}
 	}
 	return v1.Descriptor{
 		MediaType: mediaType,

+ 62 - 0
vendor/github.com/containerd/containerd/task_opts.go

@@ -18,10 +18,18 @@ package containerd
 
 import (
 	"context"
+	"encoding/json"
+	"fmt"
 	"syscall"
 
+	"github.com/containerd/containerd/api/types"
+	"github.com/containerd/containerd/content"
 	"github.com/containerd/containerd/errdefs"
+	"github.com/containerd/containerd/images"
 	"github.com/containerd/containerd/mount"
+	imagespec "github.com/opencontainers/image-spec/specs-go/v1"
+	"github.com/opencontainers/runtime-spec/specs-go"
+	"github.com/pkg/errors"
 )
 
 // NewTaskOpts allows the caller to set options on a new task
@@ -35,6 +43,44 @@ func WithRootFS(mounts []mount.Mount) NewTaskOpts {
 	}
 }
 
+// WithTaskCheckpoint allows a task to be created with live runtime and memory data from a
+// previous checkpoint. Additional software such as CRIU may be required to
+// restore a task from a checkpoint
+func WithTaskCheckpoint(im Image) NewTaskOpts {
+	return func(ctx context.Context, c *Client, info *TaskInfo) error {
+		desc := im.Target()
+		id := desc.Digest
+		index, err := decodeIndex(ctx, c.ContentStore(), desc)
+		if err != nil {
+			return err
+		}
+		for _, m := range index.Manifests {
+			if m.MediaType == images.MediaTypeContainerd1Checkpoint {
+				info.Checkpoint = &types.Descriptor{
+					MediaType: m.MediaType,
+					Size_:     m.Size,
+					Digest:    m.Digest,
+				}
+				return nil
+			}
+		}
+		return fmt.Errorf("checkpoint not found in index %s", id)
+	}
+}
+
+func decodeIndex(ctx context.Context, store content.Provider, desc imagespec.Descriptor) (*imagespec.Index, error) {
+	var index imagespec.Index
+	p, err := content.ReadBlob(ctx, store, desc)
+	if err != nil {
+		return nil, err
+	}
+	if err := json.Unmarshal(p, &index); err != nil {
+		return nil, err
+	}
+
+	return &index, nil
+}
+
 // WithCheckpointName sets the image name for the checkpoint
 func WithCheckpointName(name string) CheckpointTaskOpts {
 	return func(r *CheckpointTaskInfo) error {
@@ -92,3 +138,19 @@ func WithKillExecID(execID string) KillOpts {
 		return nil
 	}
 }
+
+// WithResources sets the provided resources for task updates. Resources must be
+// either a *specs.LinuxResources or a *specs.WindowsResources
+func WithResources(resources interface{}) UpdateTaskOpts {
+	return func(ctx context.Context, client *Client, r *UpdateTaskInfo) error {
+		switch resources.(type) {
+		case *specs.LinuxResources:
+		case *specs.WindowsResources:
+		default:
+			return errors.New("WithResources requires a *specs.LinuxResources or *specs.WindowsResources")
+		}
+
+		r.Resources = resources
+		return nil
+	}
+}

+ 19 - 10
vendor/github.com/containerd/containerd/task_opts_linux.go → vendor/github.com/containerd/containerd/task_opts_unix.go

@@ -1,3 +1,5 @@
+// +build !windows
+
 /*
    Copyright The containerd Authors.
 
@@ -18,20 +20,11 @@ package containerd
 
 import (
 	"context"
-	"errors"
 
 	"github.com/containerd/containerd/runtime/linux/runctypes"
-	"github.com/opencontainers/runtime-spec/specs-go"
+	"github.com/pkg/errors"
 )
 
-// WithResources sets the provided resources for task updates
-func WithResources(resources *specs.LinuxResources) UpdateTaskOpts {
-	return func(ctx context.Context, client *Client, r *UpdateTaskInfo) error {
-		r.Resources = resources
-		return nil
-	}
-}
-
 // WithNoNewKeyring causes tasks not to be created with a new keyring for secret storage.
 // There is an upper limit on the number of keyrings in a linux system
 func WithNoNewKeyring(ctx context.Context, c *Client, ti *TaskInfo) error {
@@ -46,3 +39,19 @@ func WithNoNewKeyring(ctx context.Context, c *Client, ti *TaskInfo) error {
 	opts.NoNewKeyring = true
 	return nil
 }
+
+// WithNoPivotRoot instructs the runtime not to you pivot_root
+func WithNoPivotRoot(_ context.Context, _ *Client, info *TaskInfo) error {
+	if info.Options == nil {
+		info.Options = &runctypes.CreateOptions{
+			NoPivotRoot: true,
+		}
+		return nil
+	}
+	opts, ok := info.Options.(*runctypes.CreateOptions)
+	if !ok {
+		return errors.New("invalid options type, expected runctypes.CreateOptions")
+	}
+	opts.NoPivotRoot = true
+	return nil
+}

+ 21 - 20
vendor/github.com/containerd/containerd/vendor.conf

@@ -1,10 +1,10 @@
-github.com/containerd/go-runc acb7c88cac264acca9b5eae187a117f4d77a1292
+github.com/containerd/go-runc 5a6d9f37cfa36b15efba46dc7ea349fa9b7143c3
 github.com/containerd/console c12b1e7919c14469339a5d38f2f8ed9b64a9de23
 github.com/containerd/cgroups 5e610833b72089b37d0e615de9a92dfc043757c2
 github.com/containerd/typeurl a93fcdb778cd272c6e9b3028b2f42d813e785d40
 github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c
 github.com/containerd/btrfs 2e1aa0ddf94f91fa282b6ed87c23bf0d64911244
-github.com/containerd/continuity d3c23511c1bf5851696cba83143d9cbcd666869b
+github.com/containerd/continuity f44b615e492bdfb371aae2f76ec694d9da1db537
 github.com/coreos/go-systemd 48702e0da86bd25e76cfef347e2adeb434a0d0a6
 github.com/docker/go-metrics 4ea375f7759c82740c893fc030bc37088d2ec098
 github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
@@ -19,7 +19,7 @@ github.com/matttproud/golang_protobuf_extensions v1.0.0
 github.com/gogo/protobuf v1.0.0
 github.com/gogo/googleapis 08a7655d27152912db7aaf4f983275eaf8d128ef
 github.com/golang/protobuf v1.1.0
-github.com/opencontainers/runtime-spec d810dbc60d8c5aeeb3d054bd1132fab2121968ce # v1.0.1-43-gd810dbc
+github.com/opencontainers/runtime-spec eba862dc2470385a233c7507392675cbeadf7353 # v1.0.1-45-geba862d
 github.com/opencontainers/runc 20aff4f0488c6d4b8df4d85b4f63f1f704c11abd
 github.com/sirupsen/logrus v1.0.0
 github.com/urfave/cli 7bc6a0acffa589f415f88aca16cc1de5ffd66f9c
@@ -34,17 +34,17 @@ github.com/BurntSushi/toml a368813c5e648fee92e5f6c30e3944ff9d5e8895
 github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0
 github.com/Microsoft/go-winio v0.4.10
 github.com/Microsoft/hcsshim 44c060121b68e8bdc40b411beba551f3b4ee9e55
-github.com/boltdb/bolt e9cf4fae01b5a8ff89d0ec6b32f0d9c9f79aefdd
 google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
 golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4
 github.com/containerd/ttrpc 94dde388801693c54f88a6596f713b51a8b30b2d
 github.com/syndtr/gocapability db04d3cc01c8b54962a58ec7e491717d06cfcc16
 gotest.tools v2.1.0
 github.com/google/go-cmp v0.1.0
+go.etcd.io/bbolt v1.3.1-etcd.8
 
 # cri dependencies
-github.com/containerd/cri v1.11.1
-github.com/containerd/go-cni 5882530828ecf62032409b298a3e8b19e08b6534
+github.com/containerd/cri 9f39e3289533fc228c5e5fcac0a6dbdd60c6047b # release/1.2 branch
+github.com/containerd/go-cni 6d7b509a054a3cb1c35ed1865d4fde2f0cb547cd
 github.com/blang/semver v3.1.0
 github.com/containernetworking/cni v0.6.0
 github.com/containernetworking/plugins v0.7.0
@@ -52,32 +52,33 @@ github.com/davecgh/go-spew v1.1.0
 github.com/docker/distribution b38e5838b7b2f2ad48e06ec4b500011976080621
 github.com/docker/docker 86f080cff0914e9694068ed78d503701667c4c00
 github.com/docker/spdystream 449fdfce4d962303d702fec724ef0ad181c92528
-github.com/emicklei/go-restful ff4f55a206334ef123e4f79bbf348980da81ca46
-github.com/ghodss/yaml 73d445a93680fa1a78ae23a5839bad48f32ba1ee
+github.com/emicklei/go-restful v2.2.1
+github.com/ghodss/yaml v1.0.0
 github.com/golang/glog 44145f04b68cf362d9c4df2182967c2275eaefed
 github.com/google/gofuzz 44d81051d367757e1c7c6a5a86423ece9afcf63c
 github.com/hashicorp/errwrap 7554cd9344cec97297fa6649b055a8c98c2a1e55
 github.com/hashicorp/go-multierror ed905158d87462226a13fe39ddf685ea65f1c11f
-github.com/json-iterator/go f2b4162afba35581b6d4a50d3b8f34e33c144682
-github.com/modern-go/reflect2 05fbef0ca5da472bbf96c9322b84a53edc03c9fd
+github.com/json-iterator/go 1.1.5
+github.com/modern-go/reflect2 1.0.1
 github.com/modern-go/concurrent 1.0.3
 github.com/opencontainers/runtime-tools v0.6.0
-github.com/opencontainers/selinux 4a2974bf1ee960774ffd517717f1f45325af0206
+github.com/opencontainers/selinux b6fa367ed7f534f9ba25391cc2d467085dbb445a
 github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0
-github.com/tchap/go-patricia 5ad6cdb7538b0097d5598c7e57f0a24072adf7dc
+github.com/tchap/go-patricia v2.2.6
 github.com/xeipuuv/gojsonpointer 4e3ac2762d5f479393488629ee9370b50873b3a6
 github.com/xeipuuv/gojsonreference bd5ef7bd5415a7ac448318e64f11a24cd21e594b
 github.com/xeipuuv/gojsonschema 1d523034197ff1f222f6429836dd36a2457a1874
 golang.org/x/crypto 49796115aa4b964c318aad4f3084fdb41e9aa067
+golang.org/x/oauth2 a6bd8cefa1811bd24b86f8902872e4e8225f74c4
 golang.org/x/time f51c12702a4d776e4c1fa9b0fabab841babae631
 gopkg.in/inf.v0 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4
-gopkg.in/yaml.v2 53feefa2559fb8dfa8d81baad31be332c97d6c77
-k8s.io/api 9e5ffd1f1320950b238cfce291b926411f0af722
-k8s.io/apimachinery ed135c5b96450fd24e5e981c708114fbbd950697
-k8s.io/apiserver a90e3a95c2e91b944bfca8225c4e0d12e42a9eb5
-k8s.io/client-go 03bfb9bdcfe5482795b999f39ca3ed9ad42ce5bb
-k8s.io/kubernetes v1.11.0
-k8s.io/utils 733eca437aa39379e4bcc25e726439dfca40fcff
+gopkg.in/yaml.v2 v2.2.1
+k8s.io/api 012f271b5d41baad56190c5f1ae19bff16df0fd8
+k8s.io/apimachinery 6429050ef506887d121f3e7306e894f8900d8a63
+k8s.io/apiserver e9312c15296b6c2c923ebd5031ff5d1d5fd022d7
+k8s.io/client-go 37c3c02ec96533daec0dbda1f39a6b1d68505c79
+k8s.io/kubernetes v1.12.0-beta.1
+k8s.io/utils 982821ea41da7e7c15f3d3738921eb2e7e241ccd
 
 # zfs dependencies
 github.com/containerd/zfs 9a0b8b8b5982014b729cd34eb7cd7a11062aa6ec
@@ -85,4 +86,4 @@ github.com/mistifyio/go-zfs 166add352731e515512690329794ee593f1aaff2
 github.com/pborman/uuid c65b2f87fee37d1c7854c9164a450713c28d50cd
 
 # aufs dependencies
-github.com/containerd/aufs a7fbd554da7a9eafbe5a460a421313a9fd18d988
+github.com/containerd/aufs ffa39970e26ad01d81f540b21e65f9c1841a5f92

+ 657 - 0
vendor/github.com/containerd/continuity/context.go

@@ -0,0 +1,657 @@
+package continuity
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"log"
+	"os"
+	"path/filepath"
+	"strings"
+
+	"github.com/containerd/continuity/devices"
+	driverpkg "github.com/containerd/continuity/driver"
+	"github.com/containerd/continuity/pathdriver"
+
+	"github.com/opencontainers/go-digest"
+)
+
+var (
+	// ErrNotFound represents the resource not found
+	ErrNotFound = fmt.Errorf("not found")
+	// ErrNotSupported represents the resource not supported
+	ErrNotSupported = fmt.Errorf("not supported")
+)
+
+// Context represents a file system context for accessing resources. The
+// responsibility of the context is to convert system specific resources to
+// generic Resource objects. Most of this is safe path manipulation, as well
+// as extraction of resource details.
+type Context interface {
+	Apply(Resource) error
+	Verify(Resource) error
+	Resource(string, os.FileInfo) (Resource, error)
+	Walk(filepath.WalkFunc) error
+}
+
+// SymlinkPath is intended to give the symlink target value
+// in a root context. Target and linkname are absolute paths
+// not under the given root.
+type SymlinkPath func(root, linkname, target string) (string, error)
+
+// ContextOptions represents options to create a new context.
+type ContextOptions struct {
+	Digester   Digester
+	Driver     driverpkg.Driver
+	PathDriver pathdriver.PathDriver
+	Provider   ContentProvider
+}
+
+// context represents a file system context for accessing resources.
+// Generally, all path qualified access and system considerations should land
+// here.
+type context struct {
+	driver     driverpkg.Driver
+	pathDriver pathdriver.PathDriver
+	root       string
+	digester   Digester
+	provider   ContentProvider
+}
+
+// NewContext returns a Context associated with root. The default driver will
+// be used, as returned by NewDriver.
+func NewContext(root string) (Context, error) {
+	return NewContextWithOptions(root, ContextOptions{})
+}
+
+// NewContextWithOptions returns a Context associate with the root.
+func NewContextWithOptions(root string, options ContextOptions) (Context, error) {
+	// normalize to absolute path
+	pathDriver := options.PathDriver
+	if pathDriver == nil {
+		pathDriver = pathdriver.LocalPathDriver
+	}
+
+	root = pathDriver.FromSlash(root)
+	root, err := pathDriver.Abs(pathDriver.Clean(root))
+	if err != nil {
+		return nil, err
+	}
+
+	driver := options.Driver
+	if driver == nil {
+		driver, err = driverpkg.NewSystemDriver()
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	digester := options.Digester
+	if digester == nil {
+		digester = simpleDigester{digest.Canonical}
+	}
+
+	// Check the root directory. Need to be a little careful here. We are
+	// allowing a link for now, but this may have odd behavior when
+	// canonicalizing paths. As long as all files are opened through the link
+	// path, this should be okay.
+	fi, err := driver.Stat(root)
+	if err != nil {
+		return nil, err
+	}
+
+	if !fi.IsDir() {
+		return nil, &os.PathError{Op: "NewContext", Path: root, Err: os.ErrInvalid}
+	}
+
+	return &context{
+		root:       root,
+		driver:     driver,
+		pathDriver: pathDriver,
+		digester:   digester,
+		provider:   options.Provider,
+	}, nil
+}
+
+// Resource returns the resource as path p, populating the entry with info
+// from fi. The path p should be the path of the resource in the context,
+// typically obtained through Walk or from the value of Resource.Path(). If fi
+// is nil, it will be resolved.
+func (c *context) Resource(p string, fi os.FileInfo) (Resource, error) {
+	fp, err := c.fullpath(p)
+	if err != nil {
+		return nil, err
+	}
+
+	if fi == nil {
+		fi, err = c.driver.Lstat(fp)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	base, err := newBaseResource(p, fi)
+	if err != nil {
+		return nil, err
+	}
+
+	base.xattrs, err = c.resolveXAttrs(fp, fi, base)
+	if err == ErrNotSupported {
+		log.Printf("resolving xattrs on %s not supported", fp)
+	} else if err != nil {
+		return nil, err
+	}
+
+	// TODO(stevvooe): Handle windows alternate data streams.
+
+	if fi.Mode().IsRegular() {
+		dgst, err := c.digest(p)
+		if err != nil {
+			return nil, err
+		}
+
+		return newRegularFile(*base, base.paths, fi.Size(), dgst)
+	}
+
+	if fi.Mode().IsDir() {
+		return newDirectory(*base)
+	}
+
+	if fi.Mode()&os.ModeSymlink != 0 {
+		// We handle relative links vs absolute links by including a
+		// beginning slash for absolute links. Effectively, the bundle's
+		// root is treated as the absolute link anchor.
+		target, err := c.driver.Readlink(fp)
+		if err != nil {
+			return nil, err
+		}
+
+		return newSymLink(*base, target)
+	}
+
+	if fi.Mode()&os.ModeNamedPipe != 0 {
+		return newNamedPipe(*base, base.paths)
+	}
+
+	if fi.Mode()&os.ModeDevice != 0 {
+		deviceDriver, ok := c.driver.(driverpkg.DeviceInfoDriver)
+		if !ok {
+			log.Printf("device extraction not supported %s", fp)
+			return nil, ErrNotSupported
+		}
+
+		// character and block devices merely need to recover the
+		// major/minor device number.
+		major, minor, err := deviceDriver.DeviceInfo(fi)
+		if err != nil {
+			return nil, err
+		}
+
+		return newDevice(*base, base.paths, major, minor)
+	}
+
+	log.Printf("%q (%v) is not supported", fp, fi.Mode())
+	return nil, ErrNotFound
+}
+
+func (c *context) verifyMetadata(resource, target Resource) error {
+	if target.Mode() != resource.Mode() {
+		return fmt.Errorf("resource %q has incorrect mode: %v != %v", target.Path(), target.Mode(), resource.Mode())
+	}
+
+	if target.UID() != resource.UID() {
+		return fmt.Errorf("unexpected uid for %q: %v != %v", target.Path(), target.UID(), resource.GID())
+	}
+
+	if target.GID() != resource.GID() {
+		return fmt.Errorf("unexpected gid for %q: %v != %v", target.Path(), target.GID(), target.GID())
+	}
+
+	if xattrer, ok := resource.(XAttrer); ok {
+		txattrer, tok := target.(XAttrer)
+		if !tok {
+			return fmt.Errorf("resource %q has xattrs but target does not support them", resource.Path())
+		}
+
+		// For xattrs, only ensure that we have those defined in the resource
+		// and their values match. We can ignore other xattrs. In other words,
+		// we only verify that target has the subset defined by resource.
+		txattrs := txattrer.XAttrs()
+		for attr, value := range xattrer.XAttrs() {
+			tvalue, ok := txattrs[attr]
+			if !ok {
+				return fmt.Errorf("resource %q target missing xattr %q", resource.Path(), attr)
+			}
+
+			if !bytes.Equal(value, tvalue) {
+				return fmt.Errorf("xattr %q value differs for resource %q", attr, resource.Path())
+			}
+		}
+	}
+
+	switch r := resource.(type) {
+	case RegularFile:
+		// TODO(stevvooe): Another reason to use a record-based approach. We
+		// have to do another type switch to get this to work. This could be
+		// fixed with an Equal function, but let's study this a little more to
+		// be sure.
+		t, ok := target.(RegularFile)
+		if !ok {
+			return fmt.Errorf("resource %q target not a regular file", r.Path())
+		}
+
+		if t.Size() != r.Size() {
+			return fmt.Errorf("resource %q target has incorrect size: %v != %v", t.Path(), t.Size(), r.Size())
+		}
+	case Directory:
+		t, ok := target.(Directory)
+		if !ok {
+			return fmt.Errorf("resource %q target not a directory", t.Path())
+		}
+	case SymLink:
+		t, ok := target.(SymLink)
+		if !ok {
+			return fmt.Errorf("resource %q target not a symlink", t.Path())
+		}
+
+		if t.Target() != r.Target() {
+			return fmt.Errorf("resource %q target has mismatched target: %q != %q", t.Path(), t.Target(), r.Target())
+		}
+	case Device:
+		t, ok := target.(Device)
+		if !ok {
+			return fmt.Errorf("resource %q is not a device", t.Path())
+		}
+
+		if t.Major() != r.Major() || t.Minor() != r.Minor() {
+			return fmt.Errorf("resource %q has mismatched major/minor numbers: %d,%d != %d,%d", t.Path(), t.Major(), t.Minor(), r.Major(), r.Minor())
+		}
+	case NamedPipe:
+		t, ok := target.(NamedPipe)
+		if !ok {
+			return fmt.Errorf("resource %q is not a named pipe", t.Path())
+		}
+	default:
+		return fmt.Errorf("cannot verify resource: %v", resource)
+	}
+
+	return nil
+}
+
+// Verify the resource in the context. An error will be returned a discrepancy
+// is found.
+func (c *context) Verify(resource Resource) error {
+	fp, err := c.fullpath(resource.Path())
+	if err != nil {
+		return err
+	}
+
+	fi, err := c.driver.Lstat(fp)
+	if err != nil {
+		return err
+	}
+
+	target, err := c.Resource(resource.Path(), fi)
+	if err != nil {
+		return err
+	}
+
+	if target.Path() != resource.Path() {
+		return fmt.Errorf("resource paths do not match: %q != %q", target.Path(), resource.Path())
+	}
+
+	if err := c.verifyMetadata(resource, target); err != nil {
+		return err
+	}
+
+	if h, isHardlinkable := resource.(Hardlinkable); isHardlinkable {
+		hardlinkKey, err := newHardlinkKey(fi)
+		if err == errNotAHardLink {
+			if len(h.Paths()) > 1 {
+				return fmt.Errorf("%q is not a hardlink to %q", h.Paths()[1], resource.Path())
+			}
+		} else if err != nil {
+			return err
+		}
+
+		for _, path := range h.Paths()[1:] {
+			fpLink, err := c.fullpath(path)
+			if err != nil {
+				return err
+			}
+
+			fiLink, err := c.driver.Lstat(fpLink)
+			if err != nil {
+				return err
+			}
+
+			targetLink, err := c.Resource(path, fiLink)
+			if err != nil {
+				return err
+			}
+
+			hardlinkKeyLink, err := newHardlinkKey(fiLink)
+			if err != nil {
+				return err
+			}
+
+			if hardlinkKeyLink != hardlinkKey {
+				return fmt.Errorf("%q is not a hardlink to %q", path, resource.Path())
+			}
+
+			if err := c.verifyMetadata(resource, targetLink); err != nil {
+				return err
+			}
+		}
+	}
+
+	switch r := resource.(type) {
+	case RegularFile:
+		t, ok := target.(RegularFile)
+		if !ok {
+			return fmt.Errorf("resource %q target not a regular file", r.Path())
+		}
+
+		// TODO(stevvooe): This may need to get a little more sophisticated
+		// for digest comparison. We may want to actually calculate the
+		// provided digests, rather than the implementations having an
+		// overlap.
+		if !digestsMatch(t.Digests(), r.Digests()) {
+			return fmt.Errorf("digests for resource %q do not match: %v != %v", t.Path(), t.Digests(), r.Digests())
+		}
+	}
+
+	return nil
+}
+
+func (c *context) checkoutFile(fp string, rf RegularFile) error {
+	if c.provider == nil {
+		return fmt.Errorf("no file provider")
+	}
+	var (
+		r   io.ReadCloser
+		err error
+	)
+	for _, dgst := range rf.Digests() {
+		r, err = c.provider.Reader(dgst)
+		if err == nil {
+			break
+		}
+	}
+	if err != nil {
+		return fmt.Errorf("file content could not be provided: %v", err)
+	}
+	defer r.Close()
+
+	return atomicWriteFile(fp, r, rf.Size(), rf.Mode())
+}
+
+// Apply the resource to the contexts. An error will be returned if the
+// operation fails. Depending on the resource type, the resource may be
+// created. For resource that cannot be resolved, an error will be returned.
+func (c *context) Apply(resource Resource) error {
+	fp, err := c.fullpath(resource.Path())
+	if err != nil {
+		return err
+	}
+
+	if !strings.HasPrefix(fp, c.root) {
+		return fmt.Errorf("resource %v escapes root", resource)
+	}
+
+	var chmod = true
+	fi, err := c.driver.Lstat(fp)
+	if err != nil {
+		if !os.IsNotExist(err) {
+			return err
+		}
+	}
+
+	switch r := resource.(type) {
+	case RegularFile:
+		if fi == nil {
+			if err := c.checkoutFile(fp, r); err != nil {
+				return fmt.Errorf("error checking out file %q: %v", resource.Path(), err)
+			}
+			chmod = false
+		} else {
+			if !fi.Mode().IsRegular() {
+				return fmt.Errorf("file %q should be a regular file, but is not", resource.Path())
+			}
+			if fi.Size() != r.Size() {
+				if err := c.checkoutFile(fp, r); err != nil {
+					return fmt.Errorf("error checking out file %q: %v", resource.Path(), err)
+				}
+			} else {
+				for _, dgst := range r.Digests() {
+					f, err := os.Open(fp)
+					if err != nil {
+						return fmt.Errorf("failure opening file for read %q: %v", resource.Path(), err)
+					}
+					compared, err := dgst.Algorithm().FromReader(f)
+					if err == nil && dgst != compared {
+						if err := c.checkoutFile(fp, r); err != nil {
+							return fmt.Errorf("error checking out file %q: %v", resource.Path(), err)
+						}
+						break
+					}
+					if err1 := f.Close(); err == nil {
+						err = err1
+					}
+					if err != nil {
+						return fmt.Errorf("error checking digest for %q: %v", resource.Path(), err)
+					}
+				}
+			}
+		}
+	case Directory:
+		if fi == nil {
+			if err := c.driver.Mkdir(fp, resource.Mode()); err != nil {
+				return err
+			}
+		} else if !fi.Mode().IsDir() {
+			return fmt.Errorf("%q should be a directory, but is not", resource.Path())
+		}
+
+	case SymLink:
+		var target string // only possibly set if target resource is a symlink
+
+		if fi != nil {
+			if fi.Mode()&os.ModeSymlink != 0 {
+				target, err = c.driver.Readlink(fp)
+				if err != nil {
+					return err
+				}
+			}
+		}
+
+		if target != r.Target() {
+			if fi != nil {
+				if err := c.driver.Remove(fp); err != nil { // RemoveAll in case of directory?
+					return err
+				}
+			}
+
+			if err := c.driver.Symlink(r.Target(), fp); err != nil {
+				return err
+			}
+		}
+
+	case Device:
+		if fi == nil {
+			if err := c.driver.Mknod(fp, resource.Mode(), int(r.Major()), int(r.Minor())); err != nil {
+				return err
+			}
+		} else if (fi.Mode() & os.ModeDevice) == 0 {
+			return fmt.Errorf("%q should be a device, but is not", resource.Path())
+		} else {
+			major, minor, err := devices.DeviceInfo(fi)
+			if err != nil {
+				return err
+			}
+			if major != r.Major() || minor != r.Minor() {
+				if err := c.driver.Remove(fp); err != nil {
+					return err
+				}
+
+				if err := c.driver.Mknod(fp, resource.Mode(), int(r.Major()), int(r.Minor())); err != nil {
+					return err
+				}
+			}
+		}
+
+	case NamedPipe:
+		if fi == nil {
+			if err := c.driver.Mkfifo(fp, resource.Mode()); err != nil {
+				return err
+			}
+		} else if (fi.Mode() & os.ModeNamedPipe) == 0 {
+			return fmt.Errorf("%q should be a named pipe, but is not", resource.Path())
+		}
+	}
+
+	if h, isHardlinkable := resource.(Hardlinkable); isHardlinkable {
+		for _, path := range h.Paths() {
+			if path == resource.Path() {
+				continue
+			}
+
+			lp, err := c.fullpath(path)
+			if err != nil {
+				return err
+			}
+
+			if _, fi := c.driver.Lstat(lp); fi == nil {
+				c.driver.Remove(lp)
+			}
+			if err := c.driver.Link(fp, lp); err != nil {
+				return err
+			}
+		}
+	}
+
+	// Update filemode if file was not created
+	if chmod {
+		if err := c.driver.Lchmod(fp, resource.Mode()); err != nil {
+			return err
+		}
+	}
+
+	if err := c.driver.Lchown(fp, resource.UID(), resource.GID()); err != nil {
+		return err
+	}
+
+	if xattrer, ok := resource.(XAttrer); ok {
+		// For xattrs, only ensure that we have those defined in the resource
+		// and their values are set. We can ignore other xattrs. In other words,
+		// we only set xattres defined by resource but never remove.
+
+		if _, ok := resource.(SymLink); ok {
+			lxattrDriver, ok := c.driver.(driverpkg.LXAttrDriver)
+			if !ok {
+				return fmt.Errorf("unsupported symlink xattr for resource %q", resource.Path())
+			}
+			if err := lxattrDriver.LSetxattr(fp, xattrer.XAttrs()); err != nil {
+				return err
+			}
+		} else {
+			xattrDriver, ok := c.driver.(driverpkg.XAttrDriver)
+			if !ok {
+				return fmt.Errorf("unsupported xattr for resource %q", resource.Path())
+			}
+			if err := xattrDriver.Setxattr(fp, xattrer.XAttrs()); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+// Walk provides a convenience function to call filepath.Walk correctly for
+// the context. Otherwise identical to filepath.Walk, the path argument is
+// corrected to be contained within the context.
+func (c *context) Walk(fn filepath.WalkFunc) error {
+	root := c.root
+	fi, err := c.driver.Lstat(c.root)
+	if err == nil && fi.Mode()&os.ModeSymlink != 0 {
+		root, err = c.driver.Readlink(c.root)
+		if err != nil {
+			return err
+		}
+	}
+	return c.pathDriver.Walk(root, func(p string, fi os.FileInfo, err error) error {
+		contained, err := c.containWithRoot(p, root)
+		return fn(contained, fi, err)
+	})
+}
+
+// fullpath returns the system path for the resource, joined with the context
+// root. The path p must be a part of the context.
+func (c *context) fullpath(p string) (string, error) {
+	p = c.pathDriver.Join(c.root, p)
+	if !strings.HasPrefix(p, c.root) {
+		return "", fmt.Errorf("invalid context path")
+	}
+
+	return p, nil
+}
+
+// contain cleans and santizes the filesystem path p to be an absolute path,
+// effectively relative to the context root.
+func (c *context) contain(p string) (string, error) {
+	return c.containWithRoot(p, c.root)
+}
+
+// containWithRoot cleans and santizes the filesystem path p to be an absolute path,
+// effectively relative to the passed root. Extra care should be used when calling this
+// instead of contain. This is needed for Walk, as if context root is a symlink,
+// it must be evaluated prior to the Walk
+func (c *context) containWithRoot(p string, root string) (string, error) {
+	sanitized, err := c.pathDriver.Rel(root, p)
+	if err != nil {
+		return "", err
+	}
+
+	// ZOMBIES(stevvooe): In certain cases, we may want to remap these to a
+	// "containment error", so the caller can decide what to do.
+	return c.pathDriver.Join("/", c.pathDriver.Clean(sanitized)), nil
+}
+
+// digest returns the digest of the file at path p, relative to the root.
+func (c *context) digest(p string) (digest.Digest, error) {
+	f, err := c.driver.Open(c.pathDriver.Join(c.root, p))
+	if err != nil {
+		return "", err
+	}
+	defer f.Close()
+
+	return c.digester.Digest(f)
+}
+
+// resolveXAttrs attempts to resolve the extended attributes for the resource
+// at the path fp, which is the full path to the resource. If the resource
+// cannot have xattrs, nil will be returned.
+func (c *context) resolveXAttrs(fp string, fi os.FileInfo, base *resource) (map[string][]byte, error) {
+	if fi.Mode().IsRegular() || fi.Mode().IsDir() {
+		xattrDriver, ok := c.driver.(driverpkg.XAttrDriver)
+		if !ok {
+			log.Println("xattr extraction not supported")
+			return nil, ErrNotSupported
+		}
+
+		return xattrDriver.Getxattr(fp)
+	}
+
+	if fi.Mode()&os.ModeSymlink != 0 {
+		lxattrDriver, ok := c.driver.(driverpkg.LXAttrDriver)
+		if !ok {
+			log.Println("xattr extraction for symlinks not supported")
+			return nil, ErrNotSupported
+		}
+
+		return lxattrDriver.LGetxattr(fp)
+	}
+
+	return nil, nil
+}

+ 88 - 0
vendor/github.com/containerd/continuity/digests.go

@@ -0,0 +1,88 @@
+package continuity
+
+import (
+	"fmt"
+	"io"
+	"sort"
+
+	"github.com/opencontainers/go-digest"
+)
+
+// Digester produces a digest for a given read stream
+type Digester interface {
+	Digest(io.Reader) (digest.Digest, error)
+}
+
+// ContentProvider produces a read stream for a given digest
+type ContentProvider interface {
+	Reader(digest.Digest) (io.ReadCloser, error)
+}
+
+type simpleDigester struct {
+	algorithm digest.Algorithm
+}
+
+func (sd simpleDigester) Digest(r io.Reader) (digest.Digest, error) {
+	digester := sd.algorithm.Digester()
+
+	if _, err := io.Copy(digester.Hash(), r); err != nil {
+		return "", err
+	}
+
+	return digester.Digest(), nil
+}
+
+// uniqifyDigests sorts and uniqifies the provided digest, ensuring that the
+// digests are not repeated and no two digests with the same algorithm have
+// different values. Because a stable sort is used, this has the effect of
+// "zipping" digest collections from multiple resources.
+func uniqifyDigests(digests ...digest.Digest) ([]digest.Digest, error) {
+	sort.Stable(digestSlice(digests)) // stable sort is important for the behavior here.
+	seen := map[digest.Digest]struct{}{}
+	algs := map[digest.Algorithm][]digest.Digest{} // detect different digests.
+
+	var out []digest.Digest
+	// uniqify the digests
+	for _, d := range digests {
+		if _, ok := seen[d]; ok {
+			continue
+		}
+
+		seen[d] = struct{}{}
+		algs[d.Algorithm()] = append(algs[d.Algorithm()], d)
+
+		if len(algs[d.Algorithm()]) > 1 {
+			return nil, fmt.Errorf("conflicting digests for %v found", d.Algorithm())
+		}
+
+		out = append(out, d)
+	}
+
+	return out, nil
+}
+
+// digestsMatch compares the two sets of digests to see if they match.
+func digestsMatch(as, bs []digest.Digest) bool {
+	all := append(as, bs...)
+
+	uniqified, err := uniqifyDigests(all...)
+	if err != nil {
+		// the only error uniqifyDigests returns is when the digests disagree.
+		return false
+	}
+
+	disjoint := len(as) + len(bs)
+	if len(uniqified) == disjoint {
+		// if these two sets have the same cardinality, we know both sides
+		// didn't share any digests.
+		return false
+	}
+
+	return true
+}
+
+type digestSlice []digest.Digest
+
+func (p digestSlice) Len() int           { return len(p) }
+func (p digestSlice) Less(i, j int) bool { return p[i] < p[j] }
+func (p digestSlice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }

+ 0 - 13
vendor/github.com/containerd/continuity/driver/driver_unix.go

@@ -6,7 +6,6 @@ import (
 	"errors"
 	"fmt"
 	"os"
-	"path/filepath"
 	"sort"
 
 	"github.com/containerd/continuity/devices"
@@ -26,18 +25,6 @@ func (d *driver) Mkfifo(path string, mode os.FileMode) error {
 	return devices.Mknod(path, mode, 0, 0)
 }
 
-// Lchmod changes the mode of an file not following symlinks.
-func (d *driver) Lchmod(path string, mode os.FileMode) (err error) {
-	if !filepath.IsAbs(path) {
-		path, err = filepath.Abs(path)
-		if err != nil {
-			return
-		}
-	}
-
-	return sysx.Fchmodat(0, path, uint32(mode), sysx.AtSymlinkNofollow)
-}
-
 // Getxattr returns all of the extended attributes for the file at path p.
 func (d *driver) Getxattr(p string) (map[string][]byte, error) {
 	xattrs, err := sysx.Listxattr(p)

+ 19 - 0
vendor/github.com/containerd/continuity/driver/lchmod_linux.go

@@ -0,0 +1,19 @@
+package driver
+
+import (
+	"os"
+
+	"golang.org/x/sys/unix"
+)
+
+// Lchmod changes the mode of a file not following symlinks.
+func (d *driver) Lchmod(path string, mode os.FileMode) error {
+	// On Linux, file mode is not supported for symlinks,
+	// and fchmodat() does not support AT_SYMLINK_NOFOLLOW,
+	// so symlinks need to be skipped entirely.
+	if st, err := os.Stat(path); err == nil && st.Mode()&os.ModeSymlink != 0 {
+		return nil
+	}
+
+	return unix.Fchmodat(unix.AT_FDCWD, path, uint32(mode), 0)
+}

+ 14 - 0
vendor/github.com/containerd/continuity/driver/lchmod_unix.go

@@ -0,0 +1,14 @@
+// +build darwin freebsd solaris
+
+package driver
+
+import (
+	"os"
+
+	"golang.org/x/sys/unix"
+)
+
+// Lchmod changes the mode of a file not following symlinks.
+func (d *driver) Lchmod(path string, mode os.FileMode) error {
+	return unix.Fchmodat(unix.AT_FDCWD, path, uint32(mode), unix.AT_SYMLINK_NOFOLLOW)
+}

+ 2 - 2
vendor/github.com/containerd/continuity/fs/du.go

@@ -10,8 +10,8 @@ type Usage struct {
 
 // DiskUsage counts the number of inodes and disk usage for the resources under
 // path.
-func DiskUsage(roots ...string) (Usage, error) {
-	return diskUsage(roots...)
+func DiskUsage(ctx context.Context, roots ...string) (Usage, error) {
+	return diskUsage(ctx, roots...)
 }
 
 // DiffUsage counts the numbers of inodes and disk usage in the

+ 7 - 1
vendor/github.com/containerd/continuity/fs/du_unix.go

@@ -24,7 +24,7 @@ func newInode(stat *syscall.Stat_t) inode {
 	}
 }
 
-func diskUsage(roots ...string) (Usage, error) {
+func diskUsage(ctx context.Context, roots ...string) (Usage, error) {
 
 	var (
 		size   int64
@@ -37,6 +37,12 @@ func diskUsage(roots ...string) (Usage, error) {
 				return err
 			}
 
+			select {
+			case <-ctx.Done():
+				return ctx.Err()
+			default:
+			}
+
 			inoKey := newInode(fi.Sys().(*syscall.Stat_t))
 			if _, ok := inodes[inoKey]; !ok {
 				inodes[inoKey] = struct{}{}

+ 7 - 1
vendor/github.com/containerd/continuity/fs/du_windows.go

@@ -8,7 +8,7 @@ import (
 	"path/filepath"
 )
 
-func diskUsage(roots ...string) (Usage, error) {
+func diskUsage(ctx context.Context, roots ...string) (Usage, error) {
 	var (
 		size int64
 	)
@@ -21,6 +21,12 @@ func diskUsage(roots ...string) (Usage, error) {
 				return err
 			}
 
+			select {
+			case <-ctx.Done():
+				return ctx.Err()
+			default:
+			}
+
 			size += fi.Size()
 			return nil
 		}); err != nil {

+ 113 - 0
vendor/github.com/containerd/continuity/groups_unix.go

@@ -0,0 +1,113 @@
+package continuity
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+	"os"
+	"strconv"
+	"strings"
+)
+
+// TODO(stevvooe): This needs a lot of work before we can call it useful.
+
+type groupIndex struct {
+	byName map[string]*group
+	byGID  map[int]*group
+}
+
+func getGroupIndex() (*groupIndex, error) {
+	f, err := os.Open("/etc/group")
+	if err != nil {
+		return nil, err
+	}
+	defer f.Close()
+
+	groups, err := parseGroups(f)
+	if err != nil {
+		return nil, err
+	}
+
+	return newGroupIndex(groups), nil
+}
+
+func newGroupIndex(groups []group) *groupIndex {
+	gi := &groupIndex{
+		byName: make(map[string]*group),
+		byGID:  make(map[int]*group),
+	}
+
+	for i, group := range groups {
+		gi.byGID[group.gid] = &groups[i]
+		gi.byName[group.name] = &groups[i]
+	}
+
+	return gi
+}
+
+type group struct {
+	name    string
+	gid     int
+	members []string
+}
+
+func getGroupName(gid int) (string, error) {
+	f, err := os.Open("/etc/group")
+	if err != nil {
+		return "", err
+	}
+	defer f.Close()
+
+	groups, err := parseGroups(f)
+	if err != nil {
+		return "", err
+	}
+
+	for _, group := range groups {
+		if group.gid == gid {
+			return group.name, nil
+		}
+	}
+
+	return "", fmt.Errorf("no group for gid")
+}
+
+// parseGroups parses an /etc/group file for group names, ids and membership.
+// This is unix specific.
+func parseGroups(rd io.Reader) ([]group, error) {
+	var groups []group
+	scanner := bufio.NewScanner(rd)
+
+	for scanner.Scan() {
+		if strings.HasPrefix(scanner.Text(), "#") {
+			continue // skip comment
+		}
+
+		parts := strings.SplitN(scanner.Text(), ":", 4)
+
+		if len(parts) != 4 {
+			return nil, fmt.Errorf("bad entry: %q", scanner.Text())
+		}
+
+		name, _, sgid, smembers := parts[0], parts[1], parts[2], parts[3]
+
+		gid, err := strconv.Atoi(sgid)
+		if err != nil {
+			return nil, fmt.Errorf("bad gid: %q", gid)
+		}
+
+		members := strings.Split(smembers, ",")
+
+		groups = append(groups, group{
+			name:    name,
+			gid:     gid,
+			members: members,
+		})
+	}
+
+	if scanner.Err() != nil {
+		return nil, scanner.Err()
+	}
+
+	return groups, nil
+}

+ 57 - 0
vendor/github.com/containerd/continuity/hardlinks.go

@@ -0,0 +1,57 @@
+package continuity
+
+import (
+	"fmt"
+	"os"
+)
+
+var (
+	errNotAHardLink = fmt.Errorf("invalid hardlink")
+)
+
+type hardlinkManager struct {
+	hardlinks map[hardlinkKey][]Resource
+}
+
+func newHardlinkManager() *hardlinkManager {
+	return &hardlinkManager{
+		hardlinks: map[hardlinkKey][]Resource{},
+	}
+}
+
+// Add attempts to add the resource to the hardlink manager. If the resource
+// cannot be considered as a hardlink candidate, errNotAHardLink is returned.
+func (hlm *hardlinkManager) Add(fi os.FileInfo, resource Resource) error {
+	if _, ok := resource.(Hardlinkable); !ok {
+		return errNotAHardLink
+	}
+
+	key, err := newHardlinkKey(fi)
+	if err != nil {
+		return err
+	}
+
+	hlm.hardlinks[key] = append(hlm.hardlinks[key], resource)
+
+	return nil
+}
+
+// Merge processes the current state of the hardlink manager and merges any
+// shared nodes into hardlinked resources.
+func (hlm *hardlinkManager) Merge() ([]Resource, error) {
+	var resources []Resource
+	for key, linked := range hlm.hardlinks {
+		if len(linked) < 1 {
+			return nil, fmt.Errorf("no hardlink entrys for dev, inode pair: %#v", key)
+		}
+
+		merged, err := Merge(linked...)
+		if err != nil {
+			return nil, fmt.Errorf("error merging hardlink: %v", err)
+		}
+
+		resources = append(resources, merged)
+	}
+
+	return resources, nil
+}

+ 36 - 0
vendor/github.com/containerd/continuity/hardlinks_unix.go

@@ -0,0 +1,36 @@
+// +build linux darwin freebsd solaris
+
+package continuity
+
+import (
+	"fmt"
+	"os"
+	"syscall"
+)
+
+// hardlinkKey provides a tuple-key for managing hardlinks. This is system-
+// specific.
+type hardlinkKey struct {
+	dev   uint64
+	inode uint64
+}
+
+// newHardlinkKey returns a hardlink key for the provided file info. If the
+// resource does not represent a possible hardlink, errNotAHardLink will be
+// returned.
+func newHardlinkKey(fi os.FileInfo) (hardlinkKey, error) {
+	sys, ok := fi.Sys().(*syscall.Stat_t)
+	if !ok {
+		return hardlinkKey{}, fmt.Errorf("cannot resolve (*syscall.Stat_t) from os.FileInfo")
+	}
+
+	if sys.Nlink < 2 {
+		// NOTE(stevvooe): This is not always true for all filesystems. We
+		// should somehow detect this and provided a slow "polyfill" that
+		// leverages os.SameFile if we detect a filesystem where link counts
+		// is not really supported.
+		return hardlinkKey{}, errNotAHardLink
+	}
+
+	return hardlinkKey{dev: uint64(sys.Dev), inode: uint64(sys.Ino)}, nil
+}

+ 12 - 0
vendor/github.com/containerd/continuity/hardlinks_windows.go

@@ -0,0 +1,12 @@
+package continuity
+
+import "os"
+
+type hardlinkKey struct{}
+
+func newHardlinkKey(fi os.FileInfo) (hardlinkKey, error) {
+	// NOTE(stevvooe): Obviously, this is not yet implemented. However, the
+	// makings of an implementation are available in src/os/types_windows.go. More
+	// investigation needs to be done to figure out exactly how to do this.
+	return hardlinkKey{}, errNotAHardLink
+}

+ 47 - 0
vendor/github.com/containerd/continuity/ioutils.go

@@ -0,0 +1,47 @@
+package continuity
+
+import (
+	"bytes"
+	"io"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+)
+
+// AtomicWriteFile atomically writes data to a file by first writing to a
+// temp file and calling rename.
+func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error {
+	buf := bytes.NewBuffer(data)
+	return atomicWriteFile(filename, buf, int64(len(data)), perm)
+}
+
+// atomicWriteFile writes data to a file by first writing to a temp
+// file and calling rename.
+func atomicWriteFile(filename string, r io.Reader, dataSize int64, perm os.FileMode) error {
+	f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename))
+	if err != nil {
+		return err
+	}
+	err = os.Chmod(f.Name(), perm)
+	if err != nil {
+		f.Close()
+		return err
+	}
+	n, err := io.Copy(f, r)
+	if err == nil && n < dataSize {
+		f.Close()
+		return io.ErrShortWrite
+	}
+	if err != nil {
+		f.Close()
+		return err
+	}
+	if err := f.Sync(); err != nil {
+		f.Close()
+		return err
+	}
+	if err := f.Close(); err != nil {
+		return err
+	}
+	return os.Rename(f.Name(), filename)
+}

+ 144 - 0
vendor/github.com/containerd/continuity/manifest.go

@@ -0,0 +1,144 @@
+package continuity
+
+import (
+	"fmt"
+	"io"
+	"log"
+	"os"
+	"sort"
+
+	pb "github.com/containerd/continuity/proto"
+	"github.com/golang/protobuf/proto"
+)
+
+// Manifest provides the contents of a manifest. Users of this struct should
+// not typically modify any fields directly.
+type Manifest struct {
+	// Resources specifies all the resources for a manifest in order by path.
+	Resources []Resource
+}
+
+func Unmarshal(p []byte) (*Manifest, error) {
+	var bm pb.Manifest
+
+	if err := proto.Unmarshal(p, &bm); err != nil {
+		return nil, err
+	}
+
+	var m Manifest
+	for _, b := range bm.Resource {
+		r, err := fromProto(b)
+		if err != nil {
+			return nil, err
+		}
+
+		m.Resources = append(m.Resources, r)
+	}
+
+	return &m, nil
+}
+
+func Marshal(m *Manifest) ([]byte, error) {
+	var bm pb.Manifest
+	for _, resource := range m.Resources {
+		bm.Resource = append(bm.Resource, toProto(resource))
+	}
+
+	return proto.Marshal(&bm)
+}
+
+func MarshalText(w io.Writer, m *Manifest) error {
+	var bm pb.Manifest
+	for _, resource := range m.Resources {
+		bm.Resource = append(bm.Resource, toProto(resource))
+	}
+
+	return proto.MarshalText(w, &bm)
+}
+
+// BuildManifest creates the manifest for the given context
+func BuildManifest(ctx Context) (*Manifest, error) {
+	resourcesByPath := map[string]Resource{}
+	hardlinks := newHardlinkManager()
+
+	if err := ctx.Walk(func(p string, fi os.FileInfo, err error) error {
+		if err != nil {
+			return fmt.Errorf("error walking %s: %v", p, err)
+		}
+
+		if p == string(os.PathSeparator) {
+			// skip root
+			return nil
+		}
+
+		resource, err := ctx.Resource(p, fi)
+		if err != nil {
+			if err == ErrNotFound {
+				return nil
+			}
+			log.Printf("error getting resource %q: %v", p, err)
+			return err
+		}
+
+		// add to the hardlink manager
+		if err := hardlinks.Add(fi, resource); err == nil {
+			// Resource has been accepted by hardlink manager so we don't add
+			// it to the resourcesByPath until we merge at the end.
+			return nil
+		} else if err != errNotAHardLink {
+			// handle any other case where we have a proper error.
+			return fmt.Errorf("adding hardlink %s: %v", p, err)
+		}
+
+		resourcesByPath[p] = resource
+
+		return nil
+	}); err != nil {
+		return nil, err
+	}
+
+	// merge and post-process the hardlinks.
+	hardlinked, err := hardlinks.Merge()
+	if err != nil {
+		return nil, err
+	}
+
+	for _, resource := range hardlinked {
+		resourcesByPath[resource.Path()] = resource
+	}
+
+	var resources []Resource
+	for _, resource := range resourcesByPath {
+		resources = append(resources, resource)
+	}
+
+	sort.Stable(ByPath(resources))
+
+	return &Manifest{
+		Resources: resources,
+	}, nil
+}
+
+// VerifyManifest verifies all the resources in a manifest
+// against files from the given context.
+func VerifyManifest(ctx Context, manifest *Manifest) error {
+	for _, resource := range manifest.Resources {
+		if err := ctx.Verify(resource); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// ApplyManifest applies on the resources in a manifest to
+// the given context.
+func ApplyManifest(ctx Context, manifest *Manifest) error {
+	for _, resource := range manifest.Resources {
+		if err := ctx.Apply(resource); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}

+ 3 - 0
vendor/github.com/containerd/continuity/proto/gen.go

@@ -0,0 +1,3 @@
+package proto
+
+//go:generate protoc --go_out=. manifest.proto

Некоторые файлы не были показаны из-за большого количества измененных файлов