2018-02-05 21:05:59 +00:00
|
|
|
package layer // import "github.com/docker/docker/layer"
|
2015-11-18 22:15:00 +00:00
|
|
|
|
|
|
|
import (
|
2023-06-23 00:33:17 +00:00
|
|
|
"context"
|
2015-11-18 22:15:00 +00:00
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
2019-05-08 01:27:15 +00:00
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2015-11-18 22:15:00 +00:00
|
|
|
"sync"
|
|
|
|
|
2023-09-13 15:41:45 +00:00
|
|
|
"github.com/containerd/log"
|
2016-05-26 02:11:51 +00:00
|
|
|
"github.com/docker/distribution"
|
2015-11-18 22:15:00 +00:00
|
|
|
"github.com/docker/docker/daemon/graphdriver"
|
2015-12-16 20:32:16 +00:00
|
|
|
"github.com/docker/docker/pkg/idtools"
|
2016-10-07 20:53:14 +00:00
|
|
|
"github.com/docker/docker/pkg/plugingetter"
|
2015-11-18 22:15:00 +00:00
|
|
|
"github.com/docker/docker/pkg/stringid"
|
2020-09-10 20:15:40 +00:00
|
|
|
"github.com/moby/locker"
|
2022-03-04 13:49:42 +00:00
|
|
|
"github.com/opencontainers/go-digest"
|
2015-11-18 22:15:00 +00:00
|
|
|
"github.com/vbatts/tar-split/tar/asm"
|
|
|
|
"github.com/vbatts/tar-split/tar/storage"
|
|
|
|
)
|
|
|
|
|
|
|
|
// maxLayerDepth represents the maximum number of
|
|
|
|
// layers which can be chained together. 125 was
|
|
|
|
// chosen to account for the 127 max in some
|
|
|
|
// graphdrivers plus the 2 additional layers
|
|
|
|
// used to create a rwlayer.
|
|
|
|
const maxLayerDepth = 125
|
|
|
|
|
|
|
|
type layerStore struct {
|
2018-03-05 23:46:21 +00:00
|
|
|
store *fileMetadataStore
|
2017-09-19 19:14:46 +00:00
|
|
|
driver graphdriver.Driver
|
|
|
|
useTarSplit bool
|
2015-11-18 22:15:00 +00:00
|
|
|
|
|
|
|
layerMap map[ChainID]*roLayer
|
|
|
|
layerL sync.Mutex
|
|
|
|
|
|
|
|
mounts map[string]*mountedLayer
|
|
|
|
mountL sync.Mutex
|
2019-05-20 21:46:54 +00:00
|
|
|
|
|
|
|
// protect *RWLayer() methods from operating on the same name/id
|
|
|
|
locker *locker.Locker
|
2015-11-18 22:15:00 +00:00
|
|
|
}
|
|
|
|
|
2015-12-16 20:32:16 +00:00
|
|
|
// StoreOptions are the options used to create a new Store instance
|
|
|
|
type StoreOptions struct {
|
2017-08-24 18:48:16 +00:00
|
|
|
Root string
|
2015-12-16 20:32:16 +00:00
|
|
|
MetadataStorePathTemplate string
|
2017-09-19 19:14:46 +00:00
|
|
|
GraphDriver string
|
2015-12-16 20:32:16 +00:00
|
|
|
GraphDriverOptions []string
|
2022-03-14 19:24:29 +00:00
|
|
|
IDMapping idtools.IdentityMapping
|
2016-10-07 20:53:14 +00:00
|
|
|
PluginGetter plugingetter.PluginGetter
|
2016-11-19 16:41:07 +00:00
|
|
|
ExperimentalEnabled bool
|
2015-12-16 20:32:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewStoreFromOptions creates a new Store instance
|
|
|
|
func NewStoreFromOptions(options StoreOptions) (Store, error) {
|
2017-09-19 19:14:46 +00:00
|
|
|
driver, err := graphdriver.New(options.GraphDriver, options.PluginGetter, graphdriver.Options{
|
|
|
|
Root: options.Root,
|
|
|
|
DriverOptions: options.GraphDriverOptions,
|
2022-03-14 19:24:29 +00:00
|
|
|
IDMap: options.IDMapping,
|
2017-09-19 19:14:46 +00:00
|
|
|
ExperimentalEnabled: options.ExperimentalEnabled,
|
|
|
|
})
|
|
|
|
if err != nil {
|
2023-02-01 09:42:05 +00:00
|
|
|
if options.GraphDriver != "" {
|
|
|
|
return nil, fmt.Errorf("error initializing graphdriver: %v: %s", err, options.GraphDriver)
|
|
|
|
}
|
2017-09-19 19:14:46 +00:00
|
|
|
return nil, fmt.Errorf("error initializing graphdriver: %v", err)
|
2015-12-16 20:32:16 +00:00
|
|
|
}
|
2023-06-23 00:33:17 +00:00
|
|
|
log.G(context.TODO()).Debugf("Initialized graph driver %s", driver)
|
2015-12-16 20:32:16 +00:00
|
|
|
|
2018-03-05 23:46:21 +00:00
|
|
|
root := fmt.Sprintf(options.MetadataStorePathTemplate, driver)
|
2015-12-16 20:32:16 +00:00
|
|
|
|
2022-01-25 13:16:43 +00:00
|
|
|
return newStoreFromGraphDriver(root, driver)
|
2015-12-16 20:32:16 +00:00
|
|
|
}
|
|
|
|
|
2018-03-05 23:46:21 +00:00
|
|
|
// newStoreFromGraphDriver creates a new Store instance using the provided
|
2017-09-19 19:14:46 +00:00
|
|
|
// metadata store and graph driver. The metadata store will be used to restore
|
2015-11-18 22:15:00 +00:00
|
|
|
// the Store.
|
2022-01-25 13:16:43 +00:00
|
|
|
func newStoreFromGraphDriver(root string, driver graphdriver.Driver) (Store, error) {
|
2017-09-19 19:14:46 +00:00
|
|
|
caps := graphdriver.Capabilities{}
|
|
|
|
if capDriver, ok := driver.(graphdriver.CapabilityDriver); ok {
|
|
|
|
caps = capDriver.Capabilities()
|
2017-03-20 18:38:17 +00:00
|
|
|
}
|
|
|
|
|
2018-03-05 23:46:21 +00:00
|
|
|
ms, err := newFSMetadataStore(root)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2015-11-18 22:15:00 +00:00
|
|
|
ls := &layerStore{
|
2018-03-05 23:46:21 +00:00
|
|
|
store: ms,
|
2017-09-19 19:14:46 +00:00
|
|
|
driver: driver,
|
2017-03-20 18:38:17 +00:00
|
|
|
layerMap: map[ChainID]*roLayer{},
|
|
|
|
mounts: map[string]*mountedLayer{},
|
2019-05-20 21:46:54 +00:00
|
|
|
locker: locker.New(),
|
2017-09-19 19:14:46 +00:00
|
|
|
useTarSplit: !caps.ReproducesExactDiffs,
|
2015-11-18 22:15:00 +00:00
|
|
|
}
|
|
|
|
|
2018-03-05 23:46:21 +00:00
|
|
|
ids, mounts, err := ms.List()
|
2015-11-18 22:15:00 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, id := range ids {
|
|
|
|
l, err := ls.loadLayer(id)
|
|
|
|
if err != nil {
|
2023-06-23 00:33:17 +00:00
|
|
|
log.G(context.TODO()).Debugf("Failed to load layer %s: %s", id, err)
|
2016-01-19 19:17:08 +00:00
|
|
|
continue
|
2015-11-18 22:15:00 +00:00
|
|
|
}
|
|
|
|
if l.parent != nil {
|
|
|
|
l.parent.referenceCount++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, mount := range mounts {
|
|
|
|
if err := ls.loadMount(mount); err != nil {
|
2023-06-23 00:33:17 +00:00
|
|
|
log.G(context.TODO()).Debugf("Failed to load mount %s: %s", mount, err)
|
2015-11-18 22:15:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ls, nil
|
|
|
|
}
|
|
|
|
|
2018-04-17 18:56:28 +00:00
|
|
|
func (ls *layerStore) Driver() graphdriver.Driver {
|
|
|
|
return ls.driver
|
|
|
|
}
|
|
|
|
|
2015-11-18 22:15:00 +00:00
|
|
|
func (ls *layerStore) loadLayer(layer ChainID) (*roLayer, error) {
|
|
|
|
cl, ok := ls.layerMap[layer]
|
|
|
|
if ok {
|
|
|
|
return cl, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
diff, err := ls.store.GetDiffID(layer)
|
|
|
|
if err != nil {
|
2016-01-19 19:17:08 +00:00
|
|
|
return nil, fmt.Errorf("failed to get diff id for %s: %s", layer, err)
|
2015-11-18 22:15:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
size, err := ls.store.GetSize(layer)
|
|
|
|
if err != nil {
|
2016-01-19 19:17:08 +00:00
|
|
|
return nil, fmt.Errorf("failed to get size for %s: %s", layer, err)
|
2015-11-18 22:15:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
cacheID, err := ls.store.GetCacheID(layer)
|
|
|
|
if err != nil {
|
2016-01-19 19:17:08 +00:00
|
|
|
return nil, fmt.Errorf("failed to get cache id for %s: %s", layer, err)
|
2015-11-18 22:15:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
parent, err := ls.store.GetParent(layer)
|
|
|
|
if err != nil {
|
2016-01-19 19:17:08 +00:00
|
|
|
return nil, fmt.Errorf("failed to get parent for %s: %s", layer, err)
|
2015-11-18 22:15:00 +00:00
|
|
|
}
|
|
|
|
|
2016-06-07 00:49:34 +00:00
|
|
|
descriptor, err := ls.store.GetDescriptor(layer)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to get descriptor for %s: %s", layer, err)
|
|
|
|
}
|
|
|
|
|
2015-11-18 22:15:00 +00:00
|
|
|
cl = &roLayer{
|
|
|
|
chainID: layer,
|
|
|
|
diffID: diff,
|
|
|
|
size: size,
|
|
|
|
cacheID: cacheID,
|
|
|
|
layerStore: ls,
|
|
|
|
references: map[Layer]struct{}{},
|
2016-06-07 00:49:34 +00:00
|
|
|
descriptor: descriptor,
|
2016-05-26 02:11:51 +00:00
|
|
|
}
|
|
|
|
|
2015-11-18 22:15:00 +00:00
|
|
|
if parent != "" {
|
|
|
|
p, err := ls.loadLayer(parent)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
cl.parent = p
|
|
|
|
}
|
|
|
|
|
|
|
|
ls.layerMap[cl.chainID] = cl
|
|
|
|
|
|
|
|
return cl, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ls *layerStore) loadMount(mount string) error {
|
2019-04-23 02:33:20 +00:00
|
|
|
ls.mountL.Lock()
|
|
|
|
defer ls.mountL.Unlock()
|
2019-05-20 21:46:54 +00:00
|
|
|
if _, ok := ls.mounts[mount]; ok {
|
2015-11-18 22:15:00 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
mountID, err := ls.store.GetMountID(mount)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
initID, err := ls.store.GetInitID(mount)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
parent, err := ls.store.GetMountParent(mount)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
ml := &mountedLayer{
|
|
|
|
name: mount,
|
|
|
|
mountID: mountID,
|
|
|
|
initID: initID,
|
|
|
|
layerStore: ls,
|
2015-12-16 22:13:50 +00:00
|
|
|
references: map[RWLayer]*referencedRWLayer{},
|
2015-11-18 22:15:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if parent != "" {
|
|
|
|
p, err := ls.loadLayer(parent)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
ml.parent = p
|
|
|
|
|
|
|
|
p.referenceCount++
|
|
|
|
}
|
|
|
|
|
|
|
|
ls.mounts[ml.name] = ml
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-03-05 23:46:21 +00:00
|
|
|
func (ls *layerStore) applyTar(tx *fileMetadataTransaction, ts io.Reader, parent string, layer *roLayer) error {
|
2017-01-07 01:23:18 +00:00
|
|
|
digester := digest.Canonical.Digester()
|
2015-11-18 22:15:00 +00:00
|
|
|
tr := io.TeeReader(ts, digester.Hash())
|
|
|
|
|
2017-03-20 18:38:17 +00:00
|
|
|
rdr := tr
|
2017-09-19 19:14:46 +00:00
|
|
|
if ls.useTarSplit {
|
2017-03-20 18:38:17 +00:00
|
|
|
tsw, err := tx.TarSplitWriter(true)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
metaPacker := storage.NewJSONPacker(tsw)
|
|
|
|
defer tsw.Close()
|
2015-11-18 22:15:00 +00:00
|
|
|
|
2017-03-20 18:38:17 +00:00
|
|
|
// we're passing nil here for the file putter, because the ApplyDiff will
|
|
|
|
// handle the extraction of the archive
|
|
|
|
rdr, err = asm.NewInputTarStream(tr, metaPacker, nil)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-11-18 22:15:00 +00:00
|
|
|
}
|
|
|
|
|
2017-09-19 19:14:46 +00:00
|
|
|
applySize, err := ls.driver.ApplyDiff(layer.cacheID, parent, rdr)
|
2018-12-21 08:30:09 +00:00
|
|
|
// discard trailing data but ensure metadata is picked up to reconstruct stream
|
|
|
|
// unconditionally call io.Copy here before checking err to ensure the resources
|
|
|
|
// allocated by NewInputTarStream above are always released
|
2021-08-24 10:10:50 +00:00
|
|
|
io.Copy(io.Discard, rdr) // ignore error as reader may be closed
|
2015-11-18 22:15:00 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
layer.size = applySize
|
|
|
|
layer.diffID = DiffID(digester.Digest())
|
|
|
|
|
2023-06-23 00:33:17 +00:00
|
|
|
log.G(context.TODO()).Debugf("Applied tar %s to %s, size: %d", layer.diffID, layer.cacheID, applySize)
|
2015-11-18 22:15:00 +00:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-09-19 19:14:46 +00:00
|
|
|
func (ls *layerStore) Register(ts io.Reader, parent ChainID) (Layer, error) {
|
|
|
|
return ls.registerWithDescriptor(ts, parent, distribution.Descriptor{})
|
2016-05-26 02:11:51 +00:00
|
|
|
}
|
|
|
|
|
2017-09-19 19:14:46 +00:00
|
|
|
func (ls *layerStore) registerWithDescriptor(ts io.Reader, parent ChainID, descriptor distribution.Descriptor) (Layer, error) {
|
2022-11-25 12:34:51 +00:00
|
|
|
// cErr is used to hold the error which will always trigger
|
2015-11-18 22:15:00 +00:00
|
|
|
// cleanup of creates sources but may not be an error returned
|
|
|
|
// to the caller (already exists).
|
2022-11-25 12:34:51 +00:00
|
|
|
var cErr error
|
2015-11-18 22:15:00 +00:00
|
|
|
var pid string
|
|
|
|
var p *roLayer
|
2017-05-19 17:38:47 +00:00
|
|
|
|
2015-11-18 22:15:00 +00:00
|
|
|
if string(parent) != "" {
|
2022-02-28 18:57:37 +00:00
|
|
|
ls.layerL.Lock()
|
2015-11-18 22:15:00 +00:00
|
|
|
p = ls.get(parent)
|
2022-02-28 18:57:37 +00:00
|
|
|
ls.layerL.Unlock()
|
2015-11-18 22:15:00 +00:00
|
|
|
if p == nil {
|
|
|
|
return nil, ErrLayerDoesNotExist
|
|
|
|
}
|
|
|
|
pid = p.cacheID
|
|
|
|
// Release parent chain if error
|
|
|
|
defer func() {
|
2022-11-25 12:34:51 +00:00
|
|
|
if cErr != nil {
|
2015-11-18 22:15:00 +00:00
|
|
|
ls.layerL.Lock()
|
|
|
|
ls.releaseLayer(p)
|
|
|
|
ls.layerL.Unlock()
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
if p.depth() >= maxLayerDepth {
|
2022-11-25 12:34:51 +00:00
|
|
|
cErr = ErrMaxDepthExceeded
|
|
|
|
return nil, cErr
|
2015-11-18 22:15:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create new roLayer
|
|
|
|
layer := &roLayer{
|
|
|
|
parent: p,
|
|
|
|
cacheID: stringid.GenerateRandomID(),
|
|
|
|
referenceCount: 1,
|
|
|
|
layerStore: ls,
|
|
|
|
references: map[Layer]struct{}{},
|
2016-06-07 00:49:34 +00:00
|
|
|
descriptor: descriptor,
|
2015-11-18 22:15:00 +00:00
|
|
|
}
|
|
|
|
|
2022-11-25 12:34:51 +00:00
|
|
|
if cErr = ls.driver.Create(layer.cacheID, pid, nil); cErr != nil {
|
|
|
|
return nil, cErr
|
2015-11-18 22:15:00 +00:00
|
|
|
}
|
|
|
|
|
2022-11-25 12:34:51 +00:00
|
|
|
tx, cErr := ls.store.StartTransaction()
|
|
|
|
if cErr != nil {
|
|
|
|
return nil, cErr
|
2015-11-18 22:15:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
defer func() {
|
2022-11-25 12:34:51 +00:00
|
|
|
if cErr != nil {
|
2023-06-23 00:33:17 +00:00
|
|
|
log.G(context.TODO()).Debugf("Cleaning up layer %s: %v", layer.cacheID, cErr)
|
2017-09-19 19:14:46 +00:00
|
|
|
if err := ls.driver.Remove(layer.cacheID); err != nil {
|
2023-06-23 00:33:17 +00:00
|
|
|
log.G(context.TODO()).Errorf("Error cleaning up cache layer %s: %v", layer.cacheID, err)
|
2015-11-18 22:15:00 +00:00
|
|
|
}
|
|
|
|
if err := tx.Cancel(); err != nil {
|
2023-06-23 00:33:17 +00:00
|
|
|
log.G(context.TODO()).Errorf("Error canceling metadata transaction %q: %s", tx.String(), err)
|
2015-11-18 22:15:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2022-11-25 12:34:51 +00:00
|
|
|
if cErr = ls.applyTar(tx, ts, pid, layer); cErr != nil {
|
|
|
|
return nil, cErr
|
2015-11-18 22:15:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if layer.parent == nil {
|
|
|
|
layer.chainID = ChainID(layer.diffID)
|
|
|
|
} else {
|
|
|
|
layer.chainID = createChainIDFromParent(layer.parent.chainID, layer.diffID)
|
|
|
|
}
|
|
|
|
|
2022-11-25 12:34:51 +00:00
|
|
|
if cErr = storeLayer(tx, layer); cErr != nil {
|
|
|
|
return nil, cErr
|
2015-11-18 22:15:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ls.layerL.Lock()
|
|
|
|
defer ls.layerL.Unlock()
|
|
|
|
|
2022-02-28 18:57:37 +00:00
|
|
|
if existingLayer := ls.get(layer.chainID); existingLayer != nil {
|
2015-11-18 22:15:00 +00:00
|
|
|
// Set error for cleanup, but do not return the error
|
2022-11-25 12:34:51 +00:00
|
|
|
cErr = errors.New("layer already exists")
|
2015-11-18 22:15:00 +00:00
|
|
|
return existingLayer.getReference(), nil
|
|
|
|
}
|
|
|
|
|
2022-11-25 12:34:51 +00:00
|
|
|
if cErr = tx.Commit(layer.chainID); cErr != nil {
|
|
|
|
return nil, cErr
|
2015-11-18 22:15:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ls.layerMap[layer.chainID] = layer
|
|
|
|
|
|
|
|
return layer.getReference(), nil
|
|
|
|
}
|
|
|
|
|
2022-02-28 18:57:37 +00:00
|
|
|
func (ls *layerStore) get(layer ChainID) *roLayer {
|
2015-11-20 12:35:01 +00:00
|
|
|
l, ok := ls.layerMap[layer]
|
2015-11-18 22:15:00 +00:00
|
|
|
if !ok {
|
|
|
|
return nil
|
|
|
|
}
|
2015-11-20 12:35:01 +00:00
|
|
|
l.referenceCount++
|
|
|
|
return l
|
|
|
|
}
|
2015-11-18 22:15:00 +00:00
|
|
|
|
|
|
|
func (ls *layerStore) Get(l ChainID) (Layer, error) {
|
2016-03-31 02:34:51 +00:00
|
|
|
ls.layerL.Lock()
|
|
|
|
defer ls.layerL.Unlock()
|
|
|
|
|
2022-02-28 18:57:37 +00:00
|
|
|
layer := ls.get(l)
|
2015-11-18 22:15:00 +00:00
|
|
|
if layer == nil {
|
|
|
|
return nil, ErrLayerDoesNotExist
|
|
|
|
}
|
|
|
|
|
|
|
|
return layer.getReference(), nil
|
|
|
|
}
|
|
|
|
|
2016-08-23 23:08:43 +00:00
|
|
|
func (ls *layerStore) Map() map[ChainID]Layer {
|
|
|
|
ls.layerL.Lock()
|
|
|
|
defer ls.layerL.Unlock()
|
|
|
|
|
|
|
|
layers := map[ChainID]Layer{}
|
|
|
|
|
|
|
|
for k, v := range ls.layerMap {
|
|
|
|
layers[k] = v
|
|
|
|
}
|
|
|
|
|
|
|
|
return layers
|
|
|
|
}
|
|
|
|
|
2015-11-18 22:15:00 +00:00
|
|
|
func (ls *layerStore) deleteLayer(layer *roLayer, metadata *Metadata) error {
|
2019-05-08 01:27:15 +00:00
|
|
|
// Rename layer digest folder first so we detect orphan layer(s)
|
|
|
|
// if ls.driver.Remove fails
|
2019-11-28 13:13:04 +00:00
|
|
|
var dir string
|
2019-05-08 01:27:15 +00:00
|
|
|
for {
|
|
|
|
dgst := digest.Digest(layer.chainID)
|
2022-11-08 15:42:13 +00:00
|
|
|
tmpID := fmt.Sprintf("%s-%s-removing", dgst.Encoded(), stringid.GenerateRandomID())
|
2019-11-28 13:13:04 +00:00
|
|
|
dir = filepath.Join(ls.store.root, string(dgst.Algorithm()), tmpID)
|
2019-05-08 01:27:15 +00:00
|
|
|
err := os.Rename(ls.store.getLayerDirectory(layer.chainID), dir)
|
|
|
|
if os.IsExist(err) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
2017-09-19 19:14:46 +00:00
|
|
|
err := ls.driver.Remove(layer.cacheID)
|
2015-11-18 22:15:00 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-05-08 01:27:15 +00:00
|
|
|
err = os.RemoveAll(dir)
|
2015-11-18 22:15:00 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
metadata.DiffID = layer.diffID
|
|
|
|
metadata.ChainID = layer.chainID
|
2022-01-21 18:01:34 +00:00
|
|
|
metadata.Size = layer.Size()
|
2015-11-18 22:15:00 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
metadata.DiffSize = layer.size
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-11-20 13:35:43 +00:00
|
|
|
func (ls *layerStore) releaseLayer(l *roLayer) ([]Metadata, error) {
|
|
|
|
depth := 0
|
|
|
|
removed := []Metadata{}
|
|
|
|
for {
|
|
|
|
if l.referenceCount == 0 {
|
|
|
|
panic("layer not retained")
|
|
|
|
}
|
|
|
|
l.referenceCount--
|
|
|
|
if l.referenceCount != 0 {
|
|
|
|
return removed, nil
|
|
|
|
}
|
2015-11-18 22:15:00 +00:00
|
|
|
|
2015-11-20 13:35:43 +00:00
|
|
|
if len(removed) == 0 && depth > 0 {
|
|
|
|
panic("cannot remove layer with child")
|
|
|
|
}
|
|
|
|
if l.hasReferences() {
|
|
|
|
panic("cannot delete referenced layer")
|
|
|
|
}
|
2019-05-08 01:27:15 +00:00
|
|
|
// Remove layer from layer map first so it is not considered to exist
|
|
|
|
// when if ls.deleteLayer fails.
|
|
|
|
delete(ls.layerMap, l.chainID)
|
|
|
|
|
2015-11-20 13:35:43 +00:00
|
|
|
var metadata Metadata
|
|
|
|
if err := ls.deleteLayer(l, &metadata); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
removed = append(removed, metadata)
|
2015-11-18 22:15:00 +00:00
|
|
|
|
2015-11-20 13:35:43 +00:00
|
|
|
if l.parent == nil {
|
|
|
|
return removed, nil
|
2015-11-18 22:15:00 +00:00
|
|
|
}
|
|
|
|
|
2015-11-20 13:35:43 +00:00
|
|
|
depth++
|
|
|
|
l = l.parent
|
|
|
|
}
|
2015-11-18 22:15:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (ls *layerStore) Release(l Layer) ([]Metadata, error) {
|
|
|
|
ls.layerL.Lock()
|
|
|
|
defer ls.layerL.Unlock()
|
|
|
|
layer, ok := ls.layerMap[l.ChainID()]
|
|
|
|
if !ok {
|
|
|
|
return []Metadata{}, nil
|
|
|
|
}
|
|
|
|
if !layer.hasReference(l) {
|
|
|
|
return nil, ErrLayerNotRetained
|
|
|
|
}
|
|
|
|
|
|
|
|
layer.deleteReference(l)
|
|
|
|
|
|
|
|
return ls.releaseLayer(layer)
|
|
|
|
}
|
|
|
|
|
2019-04-23 02:33:20 +00:00
|
|
|
func (ls *layerStore) CreateRWLayer(name string, parent ChainID, opts *CreateRWLayerOpts) (_ RWLayer, err error) {
|
2016-11-16 21:31:23 +00:00
|
|
|
var (
|
|
|
|
storageOpt map[string]string
|
|
|
|
initFunc MountInit
|
|
|
|
mountLabel string
|
|
|
|
)
|
|
|
|
|
|
|
|
if opts != nil {
|
|
|
|
mountLabel = opts.MountLabel
|
|
|
|
storageOpt = opts.StorageOpt
|
|
|
|
initFunc = opts.InitFunc
|
|
|
|
}
|
|
|
|
|
2019-05-20 21:46:54 +00:00
|
|
|
ls.locker.Lock(name)
|
|
|
|
defer ls.locker.Unlock(name)
|
|
|
|
|
2015-11-18 22:15:00 +00:00
|
|
|
ls.mountL.Lock()
|
2019-05-20 21:46:54 +00:00
|
|
|
_, ok := ls.mounts[name]
|
|
|
|
ls.mountL.Unlock()
|
|
|
|
if ok {
|
2015-12-16 22:13:50 +00:00
|
|
|
return nil, ErrMountNameConflict
|
2015-11-18 22:15:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var pid string
|
|
|
|
var p *roLayer
|
|
|
|
if string(parent) != "" {
|
2022-02-28 18:57:37 +00:00
|
|
|
ls.layerL.Lock()
|
2015-11-20 12:35:01 +00:00
|
|
|
p = ls.get(parent)
|
2022-02-28 18:57:37 +00:00
|
|
|
ls.layerL.Unlock()
|
2015-11-18 22:15:00 +00:00
|
|
|
if p == nil {
|
|
|
|
return nil, ErrLayerDoesNotExist
|
|
|
|
}
|
|
|
|
pid = p.cacheID
|
|
|
|
|
|
|
|
// Release parent chain if error
|
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
|
|
|
ls.layerL.Lock()
|
|
|
|
ls.releaseLayer(p)
|
|
|
|
ls.layerL.Unlock()
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
2019-04-23 02:33:20 +00:00
|
|
|
m := &mountedLayer{
|
2015-11-18 22:15:00 +00:00
|
|
|
name: name,
|
|
|
|
parent: p,
|
2015-12-16 22:13:50 +00:00
|
|
|
mountID: ls.mountID(name),
|
2015-11-18 22:15:00 +00:00
|
|
|
layerStore: ls,
|
2015-12-16 22:13:50 +00:00
|
|
|
references: map[RWLayer]*referencedRWLayer{},
|
2015-11-18 22:15:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if initFunc != nil {
|
2017-09-19 19:14:46 +00:00
|
|
|
pid, err = ls.initMount(m.mountID, pid, mountLabel, initFunc, storageOpt)
|
2015-11-18 22:15:00 +00:00
|
|
|
if err != nil {
|
2019-04-23 02:33:20 +00:00
|
|
|
return
|
2015-11-18 22:15:00 +00:00
|
|
|
}
|
|
|
|
m.initID = pid
|
|
|
|
}
|
|
|
|
|
2016-11-09 20:59:58 +00:00
|
|
|
createOpts := &graphdriver.CreateOpts{
|
|
|
|
StorageOpt: storageOpt,
|
|
|
|
}
|
|
|
|
|
2017-09-19 19:14:46 +00:00
|
|
|
if err = ls.driver.CreateReadWrite(m.mountID, pid, createOpts); err != nil {
|
2019-04-23 02:33:20 +00:00
|
|
|
return
|
2015-11-18 22:15:00 +00:00
|
|
|
}
|
|
|
|
if err = ls.saveMount(m); err != nil {
|
2019-04-23 02:33:20 +00:00
|
|
|
return
|
2015-11-18 22:15:00 +00:00
|
|
|
}
|
|
|
|
|
2015-12-16 22:13:50 +00:00
|
|
|
return m.getReference(), nil
|
2015-11-18 22:15:00 +00:00
|
|
|
}
|
|
|
|
|
2015-12-16 22:13:50 +00:00
|
|
|
func (ls *layerStore) GetRWLayer(id string) (RWLayer, error) {
|
2019-05-20 21:46:54 +00:00
|
|
|
ls.locker.Lock(id)
|
|
|
|
defer ls.locker.Unlock(id)
|
|
|
|
|
2015-11-18 22:15:00 +00:00
|
|
|
ls.mountL.Lock()
|
2019-04-23 02:33:20 +00:00
|
|
|
mount := ls.mounts[id]
|
|
|
|
ls.mountL.Unlock()
|
|
|
|
if mount == nil {
|
2015-12-16 22:13:50 +00:00
|
|
|
return nil, ErrMountDoesNotExist
|
2015-11-18 22:15:00 +00:00
|
|
|
}
|
|
|
|
|
2015-12-16 22:13:50 +00:00
|
|
|
return mount.getReference(), nil
|
2015-11-18 22:15:00 +00:00
|
|
|
}
|
|
|
|
|
2016-03-18 18:50:19 +00:00
|
|
|
func (ls *layerStore) GetMountID(id string) (string, error) {
|
|
|
|
ls.mountL.Lock()
|
2019-04-23 02:33:20 +00:00
|
|
|
mount := ls.mounts[id]
|
2019-05-20 21:46:54 +00:00
|
|
|
ls.mountL.Unlock()
|
|
|
|
|
2019-04-23 02:33:20 +00:00
|
|
|
if mount == nil {
|
2016-03-18 18:50:19 +00:00
|
|
|
return "", ErrMountDoesNotExist
|
|
|
|
}
|
2023-06-23 00:33:17 +00:00
|
|
|
log.G(context.TODO()).Debugf("GetMountID id: %s -> mountID: %s", id, mount.mountID)
|
2016-03-18 18:50:19 +00:00
|
|
|
|
|
|
|
return mount.mountID, nil
|
|
|
|
}
|
|
|
|
|
2015-12-16 22:13:50 +00:00
|
|
|
func (ls *layerStore) ReleaseRWLayer(l RWLayer) ([]Metadata, error) {
|
2019-05-20 21:46:54 +00:00
|
|
|
name := l.Name()
|
|
|
|
ls.locker.Lock(name)
|
|
|
|
defer ls.locker.Unlock(name)
|
|
|
|
|
2015-11-18 22:15:00 +00:00
|
|
|
ls.mountL.Lock()
|
2019-05-20 21:46:54 +00:00
|
|
|
m := ls.mounts[name]
|
2019-04-23 02:33:20 +00:00
|
|
|
ls.mountL.Unlock()
|
|
|
|
if m == nil {
|
2015-12-16 22:13:50 +00:00
|
|
|
return []Metadata{}, nil
|
2015-11-18 22:15:00 +00:00
|
|
|
}
|
2015-12-16 22:13:50 +00:00
|
|
|
|
|
|
|
if err := m.deleteReference(l); err != nil {
|
|
|
|
return nil, err
|
2015-11-18 22:15:00 +00:00
|
|
|
}
|
|
|
|
|
2015-12-16 22:13:50 +00:00
|
|
|
if m.hasReferences() {
|
|
|
|
return []Metadata{}, nil
|
|
|
|
}
|
2015-11-18 22:15:00 +00:00
|
|
|
|
2017-09-19 19:14:46 +00:00
|
|
|
if err := ls.driver.Remove(m.mountID); err != nil {
|
2023-06-23 00:33:17 +00:00
|
|
|
log.G(context.TODO()).Errorf("Error removing mounted layer %s: %s", m.name, err)
|
2016-02-19 18:42:29 +00:00
|
|
|
m.retakeReference(l)
|
2015-11-18 22:15:00 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if m.initID != "" {
|
2017-09-19 19:14:46 +00:00
|
|
|
if err := ls.driver.Remove(m.initID); err != nil {
|
2023-06-23 00:33:17 +00:00
|
|
|
log.G(context.TODO()).Errorf("Error removing init layer %s: %s", m.name, err)
|
2016-02-19 18:42:29 +00:00
|
|
|
m.retakeReference(l)
|
2015-11-18 22:15:00 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := ls.store.RemoveMount(m.name); err != nil {
|
2023-06-23 00:33:17 +00:00
|
|
|
log.G(context.TODO()).Errorf("Error removing mount metadata: %s: %s", m.name, err)
|
2016-02-19 18:42:29 +00:00
|
|
|
m.retakeReference(l)
|
2015-11-18 22:15:00 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-04-23 02:33:20 +00:00
|
|
|
ls.mountL.Lock()
|
2019-05-20 21:46:54 +00:00
|
|
|
delete(ls.mounts, name)
|
2019-04-23 02:33:20 +00:00
|
|
|
ls.mountL.Unlock()
|
2015-12-16 22:13:50 +00:00
|
|
|
|
2015-11-18 22:15:00 +00:00
|
|
|
ls.layerL.Lock()
|
|
|
|
defer ls.layerL.Unlock()
|
|
|
|
if m.parent != nil {
|
|
|
|
return ls.releaseLayer(m.parent)
|
|
|
|
}
|
|
|
|
|
|
|
|
return []Metadata{}, nil
|
|
|
|
}
|
|
|
|
|
2015-12-16 22:13:50 +00:00
|
|
|
func (ls *layerStore) saveMount(mount *mountedLayer) error {
|
|
|
|
if err := ls.store.SetMountID(mount.name, mount.mountID); err != nil {
|
|
|
|
return err
|
2015-11-18 22:15:00 +00:00
|
|
|
}
|
2015-12-16 22:13:50 +00:00
|
|
|
|
|
|
|
if mount.initID != "" {
|
|
|
|
if err := ls.store.SetInitID(mount.name, mount.initID); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-11-18 22:15:00 +00:00
|
|
|
}
|
2015-12-16 22:13:50 +00:00
|
|
|
|
|
|
|
if mount.parent != nil {
|
|
|
|
if err := ls.store.SetMountParent(mount.name, mount.parent.chainID); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-23 02:33:20 +00:00
|
|
|
ls.mountL.Lock()
|
2015-12-16 22:13:50 +00:00
|
|
|
ls.mounts[mount.name] = mount
|
2019-04-23 02:33:20 +00:00
|
|
|
ls.mountL.Unlock()
|
2015-12-16 22:13:50 +00:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-09-19 19:14:46 +00:00
|
|
|
func (ls *layerStore) initMount(graphID, parent, mountLabel string, initFunc MountInit, storageOpt map[string]string) (string, error) {
|
2015-12-16 22:13:50 +00:00
|
|
|
// Use "<graph-id>-init" to maintain compatibility with graph drivers
|
|
|
|
// which are expecting this layer with this special name. If all
|
|
|
|
// graph drivers can be updated to not rely on knowing about this layer
|
|
|
|
// then the initID should be randomly generated.
|
|
|
|
initID := fmt.Sprintf("%s-init", graphID)
|
|
|
|
|
2016-11-09 20:59:58 +00:00
|
|
|
createOpts := &graphdriver.CreateOpts{
|
|
|
|
MountLabel: mountLabel,
|
|
|
|
StorageOpt: storageOpt,
|
|
|
|
}
|
|
|
|
|
2017-09-19 19:14:46 +00:00
|
|
|
if err := ls.driver.CreateReadWrite(initID, parent, createOpts); err != nil {
|
2015-12-16 22:13:50 +00:00
|
|
|
return "", err
|
|
|
|
}
|
2017-09-19 19:14:46 +00:00
|
|
|
p, err := ls.driver.Get(initID, "")
|
2015-12-16 22:13:50 +00:00
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := initFunc(p); err != nil {
|
2017-09-19 19:14:46 +00:00
|
|
|
ls.driver.Put(initID)
|
2015-12-16 22:13:50 +00:00
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
2017-09-19 19:14:46 +00:00
|
|
|
if err := ls.driver.Put(initID); err != nil {
|
2015-12-16 22:13:50 +00:00
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
return initID, nil
|
2015-11-18 22:15:00 +00:00
|
|
|
}
|
|
|
|
|
2017-03-20 18:38:17 +00:00
|
|
|
func (ls *layerStore) getTarStream(rl *roLayer) (io.ReadCloser, error) {
|
2017-09-19 19:14:46 +00:00
|
|
|
if !ls.useTarSplit {
|
2017-03-20 18:38:17 +00:00
|
|
|
var parentCacheID string
|
|
|
|
if rl.parent != nil {
|
|
|
|
parentCacheID = rl.parent.cacheID
|
|
|
|
}
|
|
|
|
|
2017-09-19 19:14:46 +00:00
|
|
|
return ls.driver.Diff(rl.cacheID, parentCacheID)
|
2017-03-20 18:38:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
r, err := ls.store.TarSplitReader(rl.chainID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
pr, pw := io.Pipe()
|
|
|
|
go func() {
|
2017-09-19 19:14:46 +00:00
|
|
|
err := ls.assembleTarTo(rl.cacheID, r, nil, pw)
|
2017-03-20 18:38:17 +00:00
|
|
|
if err != nil {
|
|
|
|
pw.CloseWithError(err)
|
|
|
|
} else {
|
|
|
|
pw.Close()
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
return pr, nil
|
|
|
|
}
|
|
|
|
|
2017-09-19 19:14:46 +00:00
|
|
|
func (ls *layerStore) assembleTarTo(graphID string, metadata io.ReadCloser, size *int64, w io.Writer) error {
|
|
|
|
diffDriver, ok := ls.driver.(graphdriver.DiffGetterDriver)
|
2015-11-18 22:15:00 +00:00
|
|
|
if !ok {
|
2017-09-19 19:14:46 +00:00
|
|
|
diffDriver = &naiveDiffPathDriver{ls.driver}
|
2015-11-18 22:15:00 +00:00
|
|
|
}
|
|
|
|
|
2015-11-30 03:55:22 +00:00
|
|
|
defer metadata.Close()
|
|
|
|
|
2015-11-18 22:15:00 +00:00
|
|
|
// get our relative path to the container
|
2016-02-19 01:58:23 +00:00
|
|
|
fileGetCloser, err := diffDriver.DiffGetter(graphID)
|
2015-11-18 22:15:00 +00:00
|
|
|
if err != nil {
|
2015-11-30 03:55:22 +00:00
|
|
|
return err
|
2015-11-18 22:15:00 +00:00
|
|
|
}
|
2016-02-19 01:58:23 +00:00
|
|
|
defer fileGetCloser.Close()
|
2015-11-18 22:15:00 +00:00
|
|
|
|
2015-11-30 03:55:22 +00:00
|
|
|
metaUnpacker := storage.NewJSONUnpacker(metadata)
|
|
|
|
upackerCounter := &unpackSizeCounter{metaUnpacker, size}
|
2023-06-23 00:33:17 +00:00
|
|
|
log.G(context.TODO()).Debugf("Assembling tar data for %s", graphID)
|
2016-02-19 01:58:23 +00:00
|
|
|
return asm.WriteOutputTarStream(fileGetCloser, upackerCounter, w)
|
2015-11-18 22:15:00 +00:00
|
|
|
}
|
|
|
|
|
2015-12-16 20:32:16 +00:00
|
|
|
func (ls *layerStore) Cleanup() error {
|
2019-05-08 01:27:15 +00:00
|
|
|
orphanLayers, err := ls.store.getOrphan()
|
|
|
|
if err != nil {
|
2023-06-23 00:33:17 +00:00
|
|
|
log.G(context.TODO()).WithError(err).Error("cannot get orphan layers")
|
2022-06-07 20:51:47 +00:00
|
|
|
}
|
|
|
|
if len(orphanLayers) > 0 {
|
2023-06-23 00:33:17 +00:00
|
|
|
log.G(context.TODO()).Debugf("found %v orphan layers", len(orphanLayers))
|
2019-05-08 01:27:15 +00:00
|
|
|
}
|
|
|
|
for _, orphan := range orphanLayers {
|
2023-06-23 00:33:17 +00:00
|
|
|
log.G(context.TODO()).WithField("cache-id", orphan.cacheID).Debugf("removing orphan layer, chain ID: %v", orphan.chainID)
|
2019-05-08 01:27:15 +00:00
|
|
|
err = ls.driver.Remove(orphan.cacheID)
|
|
|
|
if err != nil && !os.IsNotExist(err) {
|
2023-06-23 00:33:17 +00:00
|
|
|
log.G(context.TODO()).WithError(err).WithField("cache-id", orphan.cacheID).Error("cannot remove orphan layer")
|
2019-05-08 01:27:15 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
err = ls.store.Remove(orphan.chainID, orphan.cacheID)
|
|
|
|
if err != nil {
|
2023-06-23 00:33:17 +00:00
|
|
|
log.G(context.TODO()).WithError(err).WithField("chain-id", orphan.chainID).Error("cannot remove orphan layer metadata")
|
2019-05-08 01:27:15 +00:00
|
|
|
}
|
|
|
|
}
|
2017-09-19 19:14:46 +00:00
|
|
|
return ls.driver.Cleanup()
|
2015-12-16 20:32:16 +00:00
|
|
|
}
|
|
|
|
|
2017-09-19 19:14:46 +00:00
|
|
|
func (ls *layerStore) DriverStatus() [][2]string {
|
|
|
|
return ls.driver.Status()
|
2015-12-16 20:32:16 +00:00
|
|
|
}
|
|
|
|
|
2017-09-19 19:14:46 +00:00
|
|
|
func (ls *layerStore) DriverName() string {
|
|
|
|
return ls.driver.String()
|
|
|
|
}
|
|
|
|
|
2015-11-18 22:15:00 +00:00
|
|
|
type naiveDiffPathDriver struct {
|
|
|
|
graphdriver.Driver
|
|
|
|
}
|
|
|
|
|
2016-02-19 01:58:23 +00:00
|
|
|
type fileGetPutter struct {
|
|
|
|
storage.FileGetter
|
|
|
|
driver graphdriver.Driver
|
|
|
|
id string
|
|
|
|
}
|
|
|
|
|
|
|
|
func (w *fileGetPutter) Close() error {
|
|
|
|
return w.driver.Put(w.id)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (n *naiveDiffPathDriver) DiffGetter(id string) (graphdriver.FileGetCloser, error) {
|
2015-11-18 22:15:00 +00:00
|
|
|
p, err := n.Driver.Get(id, "")
|
|
|
|
if err != nil {
|
2016-02-19 01:58:23 +00:00
|
|
|
return nil, err
|
2015-11-18 22:15:00 +00:00
|
|
|
}
|
2022-09-23 18:09:51 +00:00
|
|
|
return &fileGetPutter{storage.NewPathFileGetter(p), n.Driver, id}, nil
|
2015-11-18 22:15:00 +00:00
|
|
|
}
|