123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433 |
- package layer // import "github.com/docker/docker/layer"
- import (
- "compress/gzip"
- "context"
- "encoding/json"
- "io"
- "os"
- "path/filepath"
- "regexp"
- "strconv"
- "strings"
- "github.com/containerd/log"
- "github.com/docker/distribution"
- "github.com/docker/docker/pkg/ioutils"
- "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
- )
- var (
- stringIDRegexp = regexp.MustCompile(`^[a-f0-9]{64}(-init)?$`)
- supportedAlgorithms = []digest.Algorithm{
- digest.SHA256,
- // digest.SHA384, // Currently not used
- // digest.SHA512, // Currently not used
- }
- )
- type fileMetadataStore struct {
- root string
- }
- type fileMetadataTransaction struct {
- store *fileMetadataStore
- ws *ioutils.AtomicWriteSet
- }
- // newFSMetadataStore returns an instance of a metadata store
- // which is backed by files on disk using the provided root
- // as the root of metadata files.
- func newFSMetadataStore(root string) (*fileMetadataStore, error) {
- if err := os.MkdirAll(root, 0o700); err != nil {
- return nil, err
- }
- return &fileMetadataStore{
- root: root,
- }, nil
- }
- func (fms *fileMetadataStore) getLayerDirectory(layer ChainID) string {
- dgst := digest.Digest(layer)
- return filepath.Join(fms.root, string(dgst.Algorithm()), dgst.Encoded())
- }
- func (fms *fileMetadataStore) getLayerFilename(layer ChainID, filename string) string {
- return filepath.Join(fms.getLayerDirectory(layer), filename)
- }
- func (fms *fileMetadataStore) getMountDirectory(mount string) string {
- return filepath.Join(fms.root, "mounts", mount)
- }
- func (fms *fileMetadataStore) getMountFilename(mount, filename string) string {
- return filepath.Join(fms.getMountDirectory(mount), filename)
- }
- func (fms *fileMetadataStore) StartTransaction() (*fileMetadataTransaction, error) {
- tmpDir := filepath.Join(fms.root, "tmp")
- if err := os.MkdirAll(tmpDir, 0o755); err != nil {
- return nil, err
- }
- ws, err := ioutils.NewAtomicWriteSet(tmpDir)
- if err != nil {
- return nil, err
- }
- return &fileMetadataTransaction{
- store: fms,
- ws: ws,
- }, nil
- }
- func (fm *fileMetadataTransaction) SetSize(size int64) error {
- return fm.ws.WriteFile("size", []byte(strconv.FormatInt(size, 10)), 0o644)
- }
- func (fm *fileMetadataTransaction) SetParent(parent ChainID) error {
- return fm.ws.WriteFile("parent", []byte(digest.Digest(parent).String()), 0o644)
- }
- func (fm *fileMetadataTransaction) SetDiffID(diff DiffID) error {
- return fm.ws.WriteFile("diff", []byte(digest.Digest(diff).String()), 0o644)
- }
- func (fm *fileMetadataTransaction) SetCacheID(cacheID string) error {
- return fm.ws.WriteFile("cache-id", []byte(cacheID), 0o644)
- }
- func (fm *fileMetadataTransaction) SetDescriptor(ref distribution.Descriptor) error {
- jsonRef, err := json.Marshal(ref)
- if err != nil {
- return err
- }
- return fm.ws.WriteFile("descriptor.json", jsonRef, 0o644)
- }
- func (fm *fileMetadataTransaction) TarSplitWriter(compressInput bool) (io.WriteCloser, error) {
- f, err := fm.ws.FileWriter("tar-split.json.gz", os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0o644)
- if err != nil {
- return nil, err
- }
- var wc io.WriteCloser
- if compressInput {
- wc = gzip.NewWriter(f)
- } else {
- wc = f
- }
- return ioutils.NewWriteCloserWrapper(wc, func() error {
- wc.Close()
- return f.Close()
- }), nil
- }
- func (fm *fileMetadataTransaction) Commit(layer ChainID) error {
- finalDir := fm.store.getLayerDirectory(layer)
- if err := os.MkdirAll(filepath.Dir(finalDir), 0o755); err != nil {
- return err
- }
- return fm.ws.Commit(finalDir)
- }
- func (fm *fileMetadataTransaction) Cancel() error {
- return fm.ws.Cancel()
- }
- func (fm *fileMetadataTransaction) String() string {
- return fm.ws.String()
- }
- func (fms *fileMetadataStore) GetSize(layer ChainID) (int64, error) {
- content, err := os.ReadFile(fms.getLayerFilename(layer, "size"))
- if err != nil {
- return 0, err
- }
- size, err := strconv.ParseInt(string(content), 10, 64)
- if err != nil {
- return 0, err
- }
- return size, nil
- }
- func (fms *fileMetadataStore) GetParent(layer ChainID) (ChainID, error) {
- content, err := os.ReadFile(fms.getLayerFilename(layer, "parent"))
- if err != nil {
- if os.IsNotExist(err) {
- return "", nil
- }
- return "", err
- }
- dgst, err := digest.Parse(strings.TrimSpace(string(content)))
- if err != nil {
- return "", err
- }
- return ChainID(dgst), nil
- }
- func (fms *fileMetadataStore) GetDiffID(layer ChainID) (DiffID, error) {
- content, err := os.ReadFile(fms.getLayerFilename(layer, "diff"))
- if err != nil {
- return "", err
- }
- dgst, err := digest.Parse(strings.TrimSpace(string(content)))
- if err != nil {
- return "", err
- }
- return DiffID(dgst), nil
- }
- func (fms *fileMetadataStore) GetCacheID(layer ChainID) (string, error) {
- contentBytes, err := os.ReadFile(fms.getLayerFilename(layer, "cache-id"))
- if err != nil {
- return "", err
- }
- content := strings.TrimSpace(string(contentBytes))
- if content == "" {
- return "", errors.Errorf("invalid cache id value")
- }
- return content, nil
- }
- func (fms *fileMetadataStore) GetDescriptor(layer ChainID) (distribution.Descriptor, error) {
- content, err := os.ReadFile(fms.getLayerFilename(layer, "descriptor.json"))
- if err != nil {
- if os.IsNotExist(err) {
- // only return empty descriptor to represent what is stored
- return distribution.Descriptor{}, nil
- }
- return distribution.Descriptor{}, err
- }
- var ref distribution.Descriptor
- err = json.Unmarshal(content, &ref)
- if err != nil {
- return distribution.Descriptor{}, err
- }
- return ref, err
- }
- func (fms *fileMetadataStore) TarSplitReader(layer ChainID) (io.ReadCloser, error) {
- fz, err := os.Open(fms.getLayerFilename(layer, "tar-split.json.gz"))
- if err != nil {
- return nil, err
- }
- f, err := gzip.NewReader(fz)
- if err != nil {
- fz.Close()
- return nil, err
- }
- return ioutils.NewReadCloserWrapper(f, func() error {
- f.Close()
- return fz.Close()
- }), nil
- }
- func (fms *fileMetadataStore) SetMountID(mount string, mountID string) error {
- if err := os.MkdirAll(fms.getMountDirectory(mount), 0o755); err != nil {
- return err
- }
- return os.WriteFile(fms.getMountFilename(mount, "mount-id"), []byte(mountID), 0o644)
- }
- func (fms *fileMetadataStore) SetInitID(mount string, init string) error {
- if err := os.MkdirAll(fms.getMountDirectory(mount), 0o755); err != nil {
- return err
- }
- return os.WriteFile(fms.getMountFilename(mount, "init-id"), []byte(init), 0o644)
- }
- func (fms *fileMetadataStore) SetMountParent(mount string, parent ChainID) error {
- if err := os.MkdirAll(fms.getMountDirectory(mount), 0o755); err != nil {
- return err
- }
- return os.WriteFile(fms.getMountFilename(mount, "parent"), []byte(digest.Digest(parent).String()), 0o644)
- }
- func (fms *fileMetadataStore) GetMountID(mount string) (string, error) {
- contentBytes, err := os.ReadFile(fms.getMountFilename(mount, "mount-id"))
- if err != nil {
- return "", err
- }
- content := strings.TrimSpace(string(contentBytes))
- if !stringIDRegexp.MatchString(content) {
- return "", errors.New("invalid mount id value")
- }
- return content, nil
- }
- func (fms *fileMetadataStore) GetInitID(mount string) (string, error) {
- contentBytes, err := os.ReadFile(fms.getMountFilename(mount, "init-id"))
- if err != nil {
- if os.IsNotExist(err) {
- return "", nil
- }
- return "", err
- }
- content := strings.TrimSpace(string(contentBytes))
- if !stringIDRegexp.MatchString(content) {
- return "", errors.New("invalid init id value")
- }
- return content, nil
- }
- func (fms *fileMetadataStore) GetMountParent(mount string) (ChainID, error) {
- content, err := os.ReadFile(fms.getMountFilename(mount, "parent"))
- if err != nil {
- if os.IsNotExist(err) {
- return "", nil
- }
- return "", err
- }
- dgst, err := digest.Parse(strings.TrimSpace(string(content)))
- if err != nil {
- return "", err
- }
- return ChainID(dgst), nil
- }
- func (fms *fileMetadataStore) getOrphan() ([]roLayer, error) {
- var orphanLayers []roLayer
- for _, algorithm := range supportedAlgorithms {
- fileInfos, err := os.ReadDir(filepath.Join(fms.root, string(algorithm)))
- if err != nil {
- if os.IsNotExist(err) {
- continue
- }
- return nil, err
- }
- for _, fi := range fileInfos {
- if !fi.IsDir() || !strings.HasSuffix(fi.Name(), "-removing") {
- continue
- }
- // At this stage, fi.Name value looks like <digest>-<random>-removing
- // Split on '-' to get the digest value.
- nameSplit := strings.Split(fi.Name(), "-")
- dgst := digest.NewDigestFromEncoded(algorithm, nameSplit[0])
- if err := dgst.Validate(); err != nil {
- log.G(context.TODO()).WithError(err).WithField("digest", string(algorithm)+":"+nameSplit[0]).Debug("ignoring invalid digest")
- continue
- }
- chainFile := filepath.Join(fms.root, string(algorithm), fi.Name(), "cache-id")
- contentBytes, err := os.ReadFile(chainFile)
- if err != nil {
- if !os.IsNotExist(err) {
- log.G(context.TODO()).WithError(err).WithField("digest", dgst).Error("failed to read cache ID")
- }
- continue
- }
- cacheID := strings.TrimSpace(string(contentBytes))
- if cacheID == "" {
- log.G(context.TODO()).Error("invalid cache ID")
- continue
- }
- l := &roLayer{
- chainID: ChainID(dgst),
- cacheID: cacheID,
- }
- orphanLayers = append(orphanLayers, *l)
- }
- }
- return orphanLayers, nil
- }
- func (fms *fileMetadataStore) List() ([]ChainID, []string, error) {
- var ids []ChainID
- for _, algorithm := range supportedAlgorithms {
- fileInfos, err := os.ReadDir(filepath.Join(fms.root, string(algorithm)))
- if err != nil {
- if os.IsNotExist(err) {
- continue
- }
- return nil, nil, err
- }
- for _, fi := range fileInfos {
- if fi.IsDir() && fi.Name() != "mounts" {
- dgst := digest.NewDigestFromEncoded(algorithm, fi.Name())
- if err := dgst.Validate(); err != nil {
- log.G(context.TODO()).Debugf("Ignoring invalid digest %s:%s", algorithm, fi.Name())
- } else {
- ids = append(ids, ChainID(dgst))
- }
- }
- }
- }
- fileInfos, err := os.ReadDir(filepath.Join(fms.root, "mounts"))
- if err != nil {
- if os.IsNotExist(err) {
- return ids, []string{}, nil
- }
- return nil, nil, err
- }
- var mounts []string
- for _, fi := range fileInfos {
- if fi.IsDir() {
- mounts = append(mounts, fi.Name())
- }
- }
- return ids, mounts, nil
- }
- // Remove layerdb folder if that is marked for removal
- func (fms *fileMetadataStore) Remove(layer ChainID, cache string) error {
- dgst := digest.Digest(layer)
- files, err := os.ReadDir(filepath.Join(fms.root, string(dgst.Algorithm())))
- if err != nil {
- return err
- }
- for _, f := range files {
- if !strings.HasSuffix(f.Name(), "-removing") || !strings.HasPrefix(f.Name(), dgst.Encoded()) {
- continue
- }
- // Make sure that we only remove layerdb folder which points to
- // requested cacheID
- dir := filepath.Join(fms.root, string(dgst.Algorithm()), f.Name())
- chainFile := filepath.Join(dir, "cache-id")
- contentBytes, err := os.ReadFile(chainFile)
- if err != nil {
- log.G(context.TODO()).WithError(err).WithField("file", chainFile).Error("cannot get cache ID")
- continue
- }
- cacheID := strings.TrimSpace(string(contentBytes))
- if cacheID != cache {
- continue
- }
- log.G(context.TODO()).Debugf("Removing folder: %s", dir)
- err = os.RemoveAll(dir)
- if err != nil && !os.IsNotExist(err) {
- log.G(context.TODO()).WithError(err).WithField("name", f.Name()).Error("cannot remove layer")
- continue
- }
- }
- return nil
- }
- func (fms *fileMetadataStore) RemoveMount(mount string) error {
- return os.RemoveAll(fms.getMountDirectory(mount))
- }
|