2018-02-05 21:05:59 +00:00
|
|
|
package layer // import "github.com/docker/docker/layer"
|
2015-11-18 22:15:00 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"compress/gzip"
|
2023-06-23 00:33:17 +00:00
|
|
|
"context"
|
2015-11-18 22:15:00 +00:00
|
|
|
"errors"
|
|
|
|
"io"
|
|
|
|
"os"
|
|
|
|
|
2023-06-23 00:33:17 +00:00
|
|
|
"github.com/containerd/containerd/log"
|
2022-03-04 13:49:42 +00:00
|
|
|
"github.com/opencontainers/go-digest"
|
2015-11-18 22:15:00 +00:00
|
|
|
"github.com/vbatts/tar-split/tar/asm"
|
|
|
|
"github.com/vbatts/tar-split/tar/storage"
|
|
|
|
)
|
|
|
|
|
2015-11-30 03:55:22 +00:00
|
|
|
func (ls *layerStore) ChecksumForGraphID(id, parent, oldTarDataPath, newTarDataPath string) (diffID DiffID, size int64, err error) {
|
|
|
|
defer func() {
|
2015-11-18 22:15:00 +00:00
|
|
|
if err != nil {
|
2015-11-30 03:55:22 +00:00
|
|
|
diffID, size, err = ls.checksumForGraphIDNoTarsplit(id, parent, newTarDataPath)
|
2015-11-18 22:15:00 +00:00
|
|
|
}
|
2015-11-30 03:55:22 +00:00
|
|
|
}()
|
2015-11-18 22:15:00 +00:00
|
|
|
|
2015-11-30 03:55:22 +00:00
|
|
|
if oldTarDataPath == "" {
|
|
|
|
err = errors.New("no tar-split file")
|
|
|
|
return
|
|
|
|
}
|
2015-11-18 22:15:00 +00:00
|
|
|
|
2015-11-30 03:55:22 +00:00
|
|
|
tarDataFile, err := os.Open(oldTarDataPath)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer tarDataFile.Close()
|
|
|
|
uncompressed, err := gzip.NewReader(tarDataFile)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
2015-11-18 22:15:00 +00:00
|
|
|
|
2017-01-07 01:23:18 +00:00
|
|
|
dgst := digest.Canonical.Digester()
|
2017-09-19 19:14:46 +00:00
|
|
|
err = ls.assembleTarTo(id, uncompressed, &size, dgst.Hash())
|
2015-11-30 03:55:22 +00:00
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
2015-11-18 22:15:00 +00:00
|
|
|
|
2015-11-30 03:55:22 +00:00
|
|
|
diffID = DiffID(dgst.Digest())
|
|
|
|
err = os.RemoveAll(newTarDataPath)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
err = os.Link(oldTarDataPath, newTarDataPath)
|
2015-11-18 22:15:00 +00:00
|
|
|
|
2015-11-30 03:55:22 +00:00
|
|
|
return
|
|
|
|
}
|
2015-11-18 22:15:00 +00:00
|
|
|
|
2015-11-30 03:55:22 +00:00
|
|
|
func (ls *layerStore) checksumForGraphIDNoTarsplit(id, parent, newTarDataPath string) (diffID DiffID, size int64, err error) {
|
2017-09-19 19:14:46 +00:00
|
|
|
rawarchive, err := ls.driver.Diff(id, parent)
|
2015-11-30 03:55:22 +00:00
|
|
|
if err != nil {
|
|
|
|
return
|
2015-11-18 22:15:00 +00:00
|
|
|
}
|
2015-11-30 03:55:22 +00:00
|
|
|
defer rawarchive.Close()
|
2015-11-18 22:15:00 +00:00
|
|
|
|
2015-11-30 03:55:22 +00:00
|
|
|
f, err := os.Create(newTarDataPath)
|
2015-11-18 22:15:00 +00:00
|
|
|
if err != nil {
|
2015-11-30 03:55:22 +00:00
|
|
|
return
|
2015-11-18 22:15:00 +00:00
|
|
|
}
|
2015-11-30 03:55:22 +00:00
|
|
|
defer f.Close()
|
|
|
|
mfz := gzip.NewWriter(f)
|
2016-02-18 18:55:38 +00:00
|
|
|
defer mfz.Close()
|
2015-11-30 03:55:22 +00:00
|
|
|
metaPacker := storage.NewJSONPacker(mfz)
|
2015-11-18 22:15:00 +00:00
|
|
|
|
2015-11-30 03:55:22 +00:00
|
|
|
packerCounter := &packSizeCounter{metaPacker, &size}
|
2015-11-18 22:15:00 +00:00
|
|
|
|
2015-11-30 03:55:22 +00:00
|
|
|
archive, err := asm.NewInputTarStream(rawarchive, packerCounter, nil)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
dgst, err := digest.FromReader(archive)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
diffID = DiffID(dgst)
|
|
|
|
return
|
2015-11-18 22:15:00 +00:00
|
|
|
}
|
|
|
|
|
2015-11-30 03:55:22 +00:00
|
|
|
func (ls *layerStore) RegisterByGraphID(graphID string, parent ChainID, diffID DiffID, tarDataFile string, size int64) (Layer, error) {
|
2015-11-18 22:15:00 +00:00
|
|
|
// err is used to hold the error which will always trigger
|
|
|
|
// cleanup of creates sources but may not be an error returned
|
|
|
|
// to the caller (already exists).
|
|
|
|
var err error
|
|
|
|
var p *roLayer
|
|
|
|
if string(parent) != "" {
|
2022-02-28 18:57:37 +00:00
|
|
|
ls.layerL.Lock()
|
2015-11-18 22:15:00 +00:00
|
|
|
p = ls.get(parent)
|
2022-02-28 18:57:37 +00:00
|
|
|
ls.layerL.Unlock()
|
2015-11-18 22:15:00 +00:00
|
|
|
if p == nil {
|
|
|
|
return nil, ErrLayerDoesNotExist
|
|
|
|
}
|
|
|
|
|
|
|
|
// Release parent chain if error
|
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
|
|
|
ls.layerL.Lock()
|
|
|
|
ls.releaseLayer(p)
|
|
|
|
ls.layerL.Unlock()
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create new roLayer
|
|
|
|
layer := &roLayer{
|
|
|
|
parent: p,
|
|
|
|
cacheID: graphID,
|
|
|
|
referenceCount: 1,
|
|
|
|
layerStore: ls,
|
|
|
|
references: map[Layer]struct{}{},
|
2015-11-30 03:55:22 +00:00
|
|
|
diffID: diffID,
|
|
|
|
size: size,
|
|
|
|
chainID: createChainIDFromParent(parent, diffID),
|
|
|
|
}
|
|
|
|
|
|
|
|
ls.layerL.Lock()
|
|
|
|
defer ls.layerL.Unlock()
|
|
|
|
|
2022-02-28 18:57:37 +00:00
|
|
|
if existingLayer := ls.get(layer.chainID); existingLayer != nil {
|
2015-11-30 03:55:22 +00:00
|
|
|
// Set error for cleanup, but do not return
|
|
|
|
err = errors.New("layer already exists")
|
|
|
|
return existingLayer.getReference(), nil
|
2015-11-18 22:15:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
tx, err := ls.store.StartTransaction()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
2023-06-23 00:33:17 +00:00
|
|
|
log.G(context.TODO()).Debugf("Cleaning up transaction after failed migration for %s: %v", graphID, err)
|
2015-11-18 22:15:00 +00:00
|
|
|
if err := tx.Cancel(); err != nil {
|
2023-06-23 00:33:17 +00:00
|
|
|
log.G(context.TODO()).Errorf("Error canceling metadata transaction %q: %s", tx.String(), err)
|
2015-11-18 22:15:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2015-11-30 03:55:22 +00:00
|
|
|
tsw, err := tx.TarSplitWriter(false)
|
|
|
|
if err != nil {
|
2015-11-18 22:15:00 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
2015-11-30 03:55:22 +00:00
|
|
|
defer tsw.Close()
|
|
|
|
tdf, err := os.Open(tarDataFile)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
defer tdf.Close()
|
|
|
|
_, err = io.Copy(tsw, tdf)
|
|
|
|
if err != nil {
|
2015-11-18 22:15:00 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2015-11-30 03:55:22 +00:00
|
|
|
if err = storeLayer(tx, layer); err != nil {
|
|
|
|
return nil, err
|
2015-11-18 22:15:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if err = tx.Commit(layer.chainID); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
ls.layerMap[layer.chainID] = layer
|
|
|
|
|
|
|
|
return layer.getReference(), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type unpackSizeCounter struct {
|
|
|
|
unpacker storage.Unpacker
|
|
|
|
size *int64
|
|
|
|
}
|
|
|
|
|
|
|
|
func (u *unpackSizeCounter) Next() (*storage.Entry, error) {
|
|
|
|
e, err := u.unpacker.Next()
|
|
|
|
if err == nil && u.size != nil {
|
|
|
|
*u.size += e.Size
|
|
|
|
}
|
|
|
|
return e, err
|
|
|
|
}
|
|
|
|
|
|
|
|
type packSizeCounter struct {
|
|
|
|
packer storage.Packer
|
|
|
|
size *int64
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *packSizeCounter) AddEntry(e storage.Entry) (int, error) {
|
|
|
|
n, err := p.packer.AddEntry(e)
|
|
|
|
if err == nil && p.size != nil {
|
|
|
|
*p.size += e.Size
|
|
|
|
}
|
|
|
|
return n, err
|
|
|
|
}
|