filestore.go 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434
  1. package layer // import "github.com/docker/docker/layer"
  2. import (
  3. "compress/gzip"
  4. "encoding/json"
  5. "fmt"
  6. "io"
  7. "os"
  8. "path/filepath"
  9. "regexp"
  10. "strconv"
  11. "strings"
  12. "github.com/docker/distribution"
  13. "github.com/docker/docker/pkg/ioutils"
  14. digest "github.com/opencontainers/go-digest"
  15. "github.com/pkg/errors"
  16. "github.com/sirupsen/logrus"
  17. )
  18. var (
  19. stringIDRegexp = regexp.MustCompile(`^[a-f0-9]{64}(-init)?$`)
  20. supportedAlgorithms = []digest.Algorithm{
  21. digest.SHA256,
  22. // digest.SHA384, // Currently not used
  23. // digest.SHA512, // Currently not used
  24. }
  25. )
  26. type fileMetadataStore struct {
  27. root string
  28. }
  29. type fileMetadataTransaction struct {
  30. store *fileMetadataStore
  31. ws *ioutils.AtomicWriteSet
  32. }
  33. // newFSMetadataStore returns an instance of a metadata store
  34. // which is backed by files on disk using the provided root
  35. // as the root of metadata files.
  36. func newFSMetadataStore(root string) (*fileMetadataStore, error) {
  37. if err := os.MkdirAll(root, 0700); err != nil {
  38. return nil, err
  39. }
  40. return &fileMetadataStore{
  41. root: root,
  42. }, nil
  43. }
  44. func (fms *fileMetadataStore) getLayerDirectory(layer ChainID) string {
  45. dgst := digest.Digest(layer)
  46. return filepath.Join(fms.root, string(dgst.Algorithm()), dgst.Hex())
  47. }
  48. func (fms *fileMetadataStore) getLayerFilename(layer ChainID, filename string) string {
  49. return filepath.Join(fms.getLayerDirectory(layer), filename)
  50. }
  51. func (fms *fileMetadataStore) getMountDirectory(mount string) string {
  52. return filepath.Join(fms.root, "mounts", mount)
  53. }
  54. func (fms *fileMetadataStore) getMountFilename(mount, filename string) string {
  55. return filepath.Join(fms.getMountDirectory(mount), filename)
  56. }
  57. func (fms *fileMetadataStore) StartTransaction() (*fileMetadataTransaction, error) {
  58. tmpDir := filepath.Join(fms.root, "tmp")
  59. if err := os.MkdirAll(tmpDir, 0755); err != nil {
  60. return nil, err
  61. }
  62. ws, err := ioutils.NewAtomicWriteSet(tmpDir)
  63. if err != nil {
  64. return nil, err
  65. }
  66. return &fileMetadataTransaction{
  67. store: fms,
  68. ws: ws,
  69. }, nil
  70. }
  71. func (fm *fileMetadataTransaction) SetSize(size int64) error {
  72. content := fmt.Sprintf("%d", size)
  73. return fm.ws.WriteFile("size", []byte(content), 0644)
  74. }
  75. func (fm *fileMetadataTransaction) SetParent(parent ChainID) error {
  76. return fm.ws.WriteFile("parent", []byte(digest.Digest(parent).String()), 0644)
  77. }
  78. func (fm *fileMetadataTransaction) SetDiffID(diff DiffID) error {
  79. return fm.ws.WriteFile("diff", []byte(digest.Digest(diff).String()), 0644)
  80. }
  81. func (fm *fileMetadataTransaction) SetCacheID(cacheID string) error {
  82. return fm.ws.WriteFile("cache-id", []byte(cacheID), 0644)
  83. }
  84. func (fm *fileMetadataTransaction) SetDescriptor(ref distribution.Descriptor) error {
  85. jsonRef, err := json.Marshal(ref)
  86. if err != nil {
  87. return err
  88. }
  89. return fm.ws.WriteFile("descriptor.json", jsonRef, 0644)
  90. }
  91. func (fm *fileMetadataTransaction) TarSplitWriter(compressInput bool) (io.WriteCloser, error) {
  92. f, err := fm.ws.FileWriter("tar-split.json.gz", os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)
  93. if err != nil {
  94. return nil, err
  95. }
  96. var wc io.WriteCloser
  97. if compressInput {
  98. wc = gzip.NewWriter(f)
  99. } else {
  100. wc = f
  101. }
  102. return ioutils.NewWriteCloserWrapper(wc, func() error {
  103. wc.Close()
  104. return f.Close()
  105. }), nil
  106. }
  107. func (fm *fileMetadataTransaction) Commit(layer ChainID) error {
  108. finalDir := fm.store.getLayerDirectory(layer)
  109. if err := os.MkdirAll(filepath.Dir(finalDir), 0755); err != nil {
  110. return err
  111. }
  112. return fm.ws.Commit(finalDir)
  113. }
  114. func (fm *fileMetadataTransaction) Cancel() error {
  115. return fm.ws.Cancel()
  116. }
  117. func (fm *fileMetadataTransaction) String() string {
  118. return fm.ws.String()
  119. }
  120. func (fms *fileMetadataStore) GetSize(layer ChainID) (int64, error) {
  121. content, err := os.ReadFile(fms.getLayerFilename(layer, "size"))
  122. if err != nil {
  123. return 0, err
  124. }
  125. size, err := strconv.ParseInt(string(content), 10, 64)
  126. if err != nil {
  127. return 0, err
  128. }
  129. return size, nil
  130. }
  131. func (fms *fileMetadataStore) GetParent(layer ChainID) (ChainID, error) {
  132. content, err := os.ReadFile(fms.getLayerFilename(layer, "parent"))
  133. if err != nil {
  134. if os.IsNotExist(err) {
  135. return "", nil
  136. }
  137. return "", err
  138. }
  139. dgst, err := digest.Parse(strings.TrimSpace(string(content)))
  140. if err != nil {
  141. return "", err
  142. }
  143. return ChainID(dgst), nil
  144. }
  145. func (fms *fileMetadataStore) GetDiffID(layer ChainID) (DiffID, error) {
  146. content, err := os.ReadFile(fms.getLayerFilename(layer, "diff"))
  147. if err != nil {
  148. return "", err
  149. }
  150. dgst, err := digest.Parse(strings.TrimSpace(string(content)))
  151. if err != nil {
  152. return "", err
  153. }
  154. return DiffID(dgst), nil
  155. }
  156. func (fms *fileMetadataStore) GetCacheID(layer ChainID) (string, error) {
  157. contentBytes, err := os.ReadFile(fms.getLayerFilename(layer, "cache-id"))
  158. if err != nil {
  159. return "", err
  160. }
  161. content := strings.TrimSpace(string(contentBytes))
  162. if content == "" {
  163. return "", errors.Errorf("invalid cache id value")
  164. }
  165. return content, nil
  166. }
  167. func (fms *fileMetadataStore) GetDescriptor(layer ChainID) (distribution.Descriptor, error) {
  168. content, err := os.ReadFile(fms.getLayerFilename(layer, "descriptor.json"))
  169. if err != nil {
  170. if os.IsNotExist(err) {
  171. // only return empty descriptor to represent what is stored
  172. return distribution.Descriptor{}, nil
  173. }
  174. return distribution.Descriptor{}, err
  175. }
  176. var ref distribution.Descriptor
  177. err = json.Unmarshal(content, &ref)
  178. if err != nil {
  179. return distribution.Descriptor{}, err
  180. }
  181. return ref, err
  182. }
  183. func (fms *fileMetadataStore) TarSplitReader(layer ChainID) (io.ReadCloser, error) {
  184. fz, err := os.Open(fms.getLayerFilename(layer, "tar-split.json.gz"))
  185. if err != nil {
  186. return nil, err
  187. }
  188. f, err := gzip.NewReader(fz)
  189. if err != nil {
  190. fz.Close()
  191. return nil, err
  192. }
  193. return ioutils.NewReadCloserWrapper(f, func() error {
  194. f.Close()
  195. return fz.Close()
  196. }), nil
  197. }
  198. func (fms *fileMetadataStore) SetMountID(mount string, mountID string) error {
  199. if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil {
  200. return err
  201. }
  202. return os.WriteFile(fms.getMountFilename(mount, "mount-id"), []byte(mountID), 0644)
  203. }
  204. func (fms *fileMetadataStore) SetInitID(mount string, init string) error {
  205. if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil {
  206. return err
  207. }
  208. return os.WriteFile(fms.getMountFilename(mount, "init-id"), []byte(init), 0644)
  209. }
  210. func (fms *fileMetadataStore) SetMountParent(mount string, parent ChainID) error {
  211. if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil {
  212. return err
  213. }
  214. return os.WriteFile(fms.getMountFilename(mount, "parent"), []byte(digest.Digest(parent).String()), 0644)
  215. }
  216. func (fms *fileMetadataStore) GetMountID(mount string) (string, error) {
  217. contentBytes, err := os.ReadFile(fms.getMountFilename(mount, "mount-id"))
  218. if err != nil {
  219. return "", err
  220. }
  221. content := strings.TrimSpace(string(contentBytes))
  222. if !stringIDRegexp.MatchString(content) {
  223. return "", errors.New("invalid mount id value")
  224. }
  225. return content, nil
  226. }
  227. func (fms *fileMetadataStore) GetInitID(mount string) (string, error) {
  228. contentBytes, err := os.ReadFile(fms.getMountFilename(mount, "init-id"))
  229. if err != nil {
  230. if os.IsNotExist(err) {
  231. return "", nil
  232. }
  233. return "", err
  234. }
  235. content := strings.TrimSpace(string(contentBytes))
  236. if !stringIDRegexp.MatchString(content) {
  237. return "", errors.New("invalid init id value")
  238. }
  239. return content, nil
  240. }
  241. func (fms *fileMetadataStore) GetMountParent(mount string) (ChainID, error) {
  242. content, err := os.ReadFile(fms.getMountFilename(mount, "parent"))
  243. if err != nil {
  244. if os.IsNotExist(err) {
  245. return "", nil
  246. }
  247. return "", err
  248. }
  249. dgst, err := digest.Parse(strings.TrimSpace(string(content)))
  250. if err != nil {
  251. return "", err
  252. }
  253. return ChainID(dgst), nil
  254. }
  255. func (fms *fileMetadataStore) getOrphan() ([]roLayer, error) {
  256. var orphanLayers []roLayer
  257. for _, algorithm := range supportedAlgorithms {
  258. fileInfos, err := os.ReadDir(filepath.Join(fms.root, string(algorithm)))
  259. if err != nil {
  260. if os.IsNotExist(err) {
  261. continue
  262. }
  263. return nil, err
  264. }
  265. for _, fi := range fileInfos {
  266. if !fi.IsDir() || !strings.HasSuffix(fi.Name(), "-removing") {
  267. continue
  268. }
  269. // At this stage, fi.Name value looks like <digest>-<random>-removing
  270. // Split on '-' to get the digest value.
  271. nameSplit := strings.Split(fi.Name(), "-")
  272. dgst := digest.NewDigestFromEncoded(algorithm, nameSplit[0])
  273. if err := dgst.Validate(); err != nil {
  274. logrus.WithError(err).WithField("digest", string(algorithm)+":"+nameSplit[0]).Debug("ignoring invalid digest")
  275. continue
  276. }
  277. chainFile := filepath.Join(fms.root, string(algorithm), fi.Name(), "cache-id")
  278. contentBytes, err := os.ReadFile(chainFile)
  279. if err != nil {
  280. if !os.IsNotExist(err) {
  281. logrus.WithError(err).WithField("digest", dgst).Error("failed to read cache ID")
  282. }
  283. continue
  284. }
  285. cacheID := strings.TrimSpace(string(contentBytes))
  286. if cacheID == "" {
  287. logrus.Error("invalid cache ID")
  288. continue
  289. }
  290. l := &roLayer{
  291. chainID: ChainID(dgst),
  292. cacheID: cacheID,
  293. }
  294. orphanLayers = append(orphanLayers, *l)
  295. }
  296. }
  297. return orphanLayers, nil
  298. }
  299. func (fms *fileMetadataStore) List() ([]ChainID, []string, error) {
  300. var ids []ChainID
  301. for _, algorithm := range supportedAlgorithms {
  302. fileInfos, err := os.ReadDir(filepath.Join(fms.root, string(algorithm)))
  303. if err != nil {
  304. if os.IsNotExist(err) {
  305. continue
  306. }
  307. return nil, nil, err
  308. }
  309. for _, fi := range fileInfos {
  310. if fi.IsDir() && fi.Name() != "mounts" {
  311. dgst := digest.NewDigestFromHex(string(algorithm), fi.Name())
  312. if err := dgst.Validate(); err != nil {
  313. logrus.Debugf("Ignoring invalid digest %s:%s", algorithm, fi.Name())
  314. } else {
  315. ids = append(ids, ChainID(dgst))
  316. }
  317. }
  318. }
  319. }
  320. fileInfos, err := os.ReadDir(filepath.Join(fms.root, "mounts"))
  321. if err != nil {
  322. if os.IsNotExist(err) {
  323. return ids, []string{}, nil
  324. }
  325. return nil, nil, err
  326. }
  327. var mounts []string
  328. for _, fi := range fileInfos {
  329. if fi.IsDir() {
  330. mounts = append(mounts, fi.Name())
  331. }
  332. }
  333. return ids, mounts, nil
  334. }
  335. // Remove layerdb folder if that is marked for removal
  336. func (fms *fileMetadataStore) Remove(layer ChainID, cache string) error {
  337. dgst := digest.Digest(layer)
  338. files, err := os.ReadDir(filepath.Join(fms.root, string(dgst.Algorithm())))
  339. if err != nil {
  340. return err
  341. }
  342. for _, f := range files {
  343. if !strings.HasSuffix(f.Name(), "-removing") || !strings.HasPrefix(f.Name(), dgst.Encoded()) {
  344. continue
  345. }
  346. // Make sure that we only remove layerdb folder which points to
  347. // requested cacheID
  348. dir := filepath.Join(fms.root, string(dgst.Algorithm()), f.Name())
  349. chainFile := filepath.Join(dir, "cache-id")
  350. contentBytes, err := os.ReadFile(chainFile)
  351. if err != nil {
  352. logrus.WithError(err).WithField("file", chainFile).Error("cannot get cache ID")
  353. continue
  354. }
  355. cacheID := strings.TrimSpace(string(contentBytes))
  356. if cacheID != cache {
  357. continue
  358. }
  359. logrus.Debugf("Removing folder: %s", dir)
  360. err = os.RemoveAll(dir)
  361. if err != nil && !os.IsNotExist(err) {
  362. logrus.WithError(err).WithField("name", f.Name()).Error("cannot remove layer")
  363. continue
  364. }
  365. }
  366. return nil
  367. }
  368. func (fms *fileMetadataStore) RemoveMount(mount string) error {
  369. return os.RemoveAll(fms.getMountDirectory(mount))
  370. }