Skip to content

Commit 355cf8e

Browse files
tscolariwilliammartin
authored andcommitted
Add support to remote layers on OCI images
* Rename LayerDigest to LayerInfo * Add StringToURL helper function * Move SystemContext configuration to the create command * Add `create.blobstore_client_certificates_path` property to config file * Add TLS properties to SystemContext [finishes #150404433] Signed-off-by: Georgi Sabev <[email protected]> Signed-off-by: Danail Branekov <[email protected]> Signed-off-by: Will Martin <[email protected]>
1 parent 15202df commit 355cf8e

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

42 files changed

+810
-735
lines changed

base_image_puller/base_image_puller.go

Lines changed: 53 additions & 62 deletions
Original file line numberDiff line numberDiff line change
@@ -36,17 +36,18 @@ type UnpackSpec struct {
3636
BaseDirectory string
3737
}
3838

39-
type LayerDigest struct {
39+
type LayerInfo struct {
4040
BlobID string
4141
ChainID string
4242
ParentChainID string
4343
Size int64
4444
BaseDirectory string
45+
URLs []string
4546
}
4647

4748
type BaseImageInfo struct {
48-
LayersDigest []LayerDigest
49-
Config specsv1.Image
49+
LayerInfos []LayerInfo
50+
Config specsv1.Image
5051
}
5152

5253
type VolumeMeta struct {
@@ -55,7 +56,7 @@ type VolumeMeta struct {
5556

5657
type Fetcher interface {
5758
BaseImageInfo(logger lager.Logger, baseImageURL *url.URL) (BaseImageInfo, error)
58-
StreamBlob(logger lager.Logger, baseImageURL *url.URL, source string) (io.ReadCloser, int64, error)
59+
StreamBlob(logger lager.Logger, baseImageURL *url.URL, layerInfo LayerInfo) (io.ReadCloser, int64, error)
5960
}
6061

6162
type DependencyRegisterer interface {
@@ -82,19 +83,17 @@ type VolumeDriver interface {
8283
}
8384

8485
type BaseImagePuller struct {
85-
tarFetcher Fetcher
86-
layerFetcher Fetcher
86+
fetcher Fetcher
8787
unpacker Unpacker
8888
volumeDriver VolumeDriver
8989
dependencyRegisterer DependencyRegisterer
9090
metricsEmitter groot.MetricsEmitter
9191
locksmith groot.Locksmith
9292
}
9393

94-
func NewBaseImagePuller(tarFetcher, layerFetcher Fetcher, unpacker Unpacker, volumeDriver VolumeDriver, dependencyRegisterer DependencyRegisterer, metricsEmitter groot.MetricsEmitter, locksmith groot.Locksmith) *BaseImagePuller {
94+
func NewBaseImagePuller(fetcher Fetcher, unpacker Unpacker, volumeDriver VolumeDriver, dependencyRegisterer DependencyRegisterer, metricsEmitter groot.MetricsEmitter, locksmith groot.Locksmith) *BaseImagePuller {
9595
return &BaseImagePuller{
96-
tarFetcher: tarFetcher,
97-
layerFetcher: layerFetcher,
96+
fetcher: fetcher,
9897
unpacker: unpacker,
9998
volumeDriver: volumeDriver,
10099
dependencyRegisterer: dependencyRegisterer,
@@ -108,21 +107,21 @@ func (p *BaseImagePuller) Pull(logger lager.Logger, spec groot.BaseImageSpec) (g
108107
logger.Info("starting")
109108
defer logger.Info("ending")
110109

111-
baseImageInfo, err := p.fetcher(spec.BaseImageSrc).BaseImageInfo(logger, spec.BaseImageSrc)
110+
baseImageInfo, err := p.fetcher.BaseImageInfo(logger, spec.BaseImageSrc)
112111
if err != nil {
113-
return groot.BaseImage{}, errorspkg.Wrap(err, "fetching list of digests")
112+
return groot.BaseImage{}, errorspkg.Wrap(err, "fetching list of layer infos")
114113
}
115-
logger.Debug("fetched-layers-digests", lager.Data{"digests": baseImageInfo.LayersDigest})
114+
logger.Debug("fetched-layer-infos", lager.Data{"infos": baseImageInfo.LayerInfos})
116115

117-
if err = p.quotaExceeded(logger, baseImageInfo.LayersDigest, spec); err != nil {
116+
if err = p.quotaExceeded(logger, baseImageInfo.LayerInfos, spec); err != nil {
118117
return groot.BaseImage{}, err
119118
}
120119

121-
err = p.buildLayer(logger, len(baseImageInfo.LayersDigest)-1, baseImageInfo.LayersDigest, spec)
120+
err = p.buildLayer(logger, len(baseImageInfo.LayerInfos)-1, baseImageInfo.LayerInfos, spec)
122121
if err != nil {
123122
return groot.BaseImage{}, err
124123
}
125-
chainIDs := p.chainIDs(baseImageInfo.LayersDigest)
124+
chainIDs := p.chainIDs(baseImageInfo.LayerInfos)
126125

127126
baseImageRefName := fmt.Sprintf(BaseImageReferenceFormat, spec.BaseImageSrc.String())
128127
if err := p.dependencyRegisterer.Register(baseImageRefName, chainIDs); err != nil {
@@ -136,20 +135,12 @@ func (p *BaseImagePuller) Pull(logger lager.Logger, spec groot.BaseImageSpec) (g
136135
return baseImage, nil
137136
}
138137

139-
func (p *BaseImagePuller) fetcher(baseImageURL *url.URL) Fetcher {
140-
if baseImageURL.Scheme == "" {
141-
return p.tarFetcher
142-
}
143-
144-
return p.layerFetcher
145-
}
146-
147-
func (p *BaseImagePuller) quotaExceeded(logger lager.Logger, layersDigest []LayerDigest, spec groot.BaseImageSpec) error {
138+
func (p *BaseImagePuller) quotaExceeded(logger lager.Logger, layerInfos []LayerInfo, spec groot.BaseImageSpec) error {
148139
if spec.ExcludeBaseImageFromQuota || spec.DiskLimit == 0 {
149140
return nil
150141
}
151142

152-
totalSize := p.layersSize(layersDigest)
143+
totalSize := p.layersSize(layerInfos)
153144
if totalSize > spec.DiskLimit {
154145
err := errorspkg.Errorf("layers exceed disk quota %d/%d bytes", totalSize, spec.DiskLimit)
155146
logger.Error("blob-manifest-size-check-failed", err, lager.Data{
@@ -163,10 +154,10 @@ func (p *BaseImagePuller) quotaExceeded(logger lager.Logger, layersDigest []Laye
163154
return nil
164155
}
165156

166-
func (p *BaseImagePuller) chainIDs(layersDigest []LayerDigest) []string {
157+
func (p *BaseImagePuller) chainIDs(layerInfos []LayerInfo) []string {
167158
chainIDs := []string{}
168-
for _, layerDigest := range layersDigest {
169-
chainIDs = append(chainIDs, layerDigest.ChainID)
159+
for _, layerInfo := range layerInfos {
160+
chainIDs = append(chainIDs, layerInfo.ChainID)
170161
}
171162
return chainIDs
172163
}
@@ -184,35 +175,35 @@ func (p *BaseImagePuller) volumeExists(logger lager.Logger, chainID string) bool
184175
return false
185176
}
186177

187-
func (p *BaseImagePuller) buildLayer(logger lager.Logger, index int, layersDigests []LayerDigest, spec groot.BaseImageSpec) error {
178+
func (p *BaseImagePuller) buildLayer(logger lager.Logger, index int, layerInfos []LayerInfo, spec groot.BaseImageSpec) error {
188179
if index < 0 {
189180
return nil
190181
}
191182

192-
digest := layersDigests[index]
183+
layerInfo := layerInfos[index]
193184
logger = logger.Session("build-layer", lager.Data{
194-
"blobID": digest.BlobID,
195-
"chainID": digest.ChainID,
196-
"parentChainID": digest.ParentChainID,
185+
"blobID": layerInfo.BlobID,
186+
"chainID": layerInfo.ChainID,
187+
"parentChainID": layerInfo.ParentChainID,
197188
})
198-
if p.volumeExists(logger, digest.ChainID) {
189+
if p.volumeExists(logger, layerInfo.ChainID) {
199190
return nil
200191
}
201192

202-
lockFile, err := p.locksmith.Lock(digest.ChainID)
193+
lockFile, err := p.locksmith.Lock(layerInfo.ChainID)
203194
if err != nil {
204195
return errorspkg.Wrap(err, "acquiring lock")
205196
}
206197
defer p.locksmith.Unlock(lockFile)
207198

208-
if p.volumeExists(logger, digest.ChainID) {
199+
if p.volumeExists(logger, layerInfo.ChainID) {
209200
return nil
210201
}
211202

212203
downloadChan := make(chan downloadReturn, 1)
213-
go p.downloadLayer(logger, spec, digest, downloadChan)
204+
go p.downloadLayer(logger, spec, layerInfo, downloadChan)
214205

215-
if err := p.buildLayer(logger, index-1, layersDigests, spec); err != nil {
206+
if err := p.buildLayer(logger, index-1, layerInfos, spec); err != nil {
216207
return err
217208
}
218209

@@ -223,27 +214,27 @@ func (p *BaseImagePuller) buildLayer(logger lager.Logger, index int, layersDiges
223214

224215
defer downloadResult.Stream.Close()
225216

226-
var parentDigest LayerDigest
217+
var parentLayerInfo LayerInfo
227218
if index > 0 {
228-
parentDigest = layersDigests[index-1]
219+
parentLayerInfo = layerInfos[index-1]
229220
}
230-
return p.unpackLayer(logger, digest, parentDigest, spec, downloadResult.Stream)
221+
return p.unpackLayer(logger, layerInfo, parentLayerInfo, spec, downloadResult.Stream)
231222
}
232223

233224
type downloadReturn struct {
234225
Stream io.ReadCloser
235226
Err error
236227
}
237228

238-
func (p *BaseImagePuller) downloadLayer(logger lager.Logger, spec groot.BaseImageSpec, digest LayerDigest, downloadChan chan downloadReturn) {
239-
logger = logger.Session("downloading-layer", lager.Data{"LayerDigest": digest})
229+
func (p *BaseImagePuller) downloadLayer(logger lager.Logger, spec groot.BaseImageSpec, layerInfo LayerInfo, downloadChan chan downloadReturn) {
230+
logger = logger.Session("downloading-layer", lager.Data{"LayerInfo": layerInfo})
240231
logger.Debug("starting")
241232
defer logger.Debug("ending")
242233
defer p.metricsEmitter.TryEmitDurationFrom(logger, MetricsDownloadTimeName, time.Now())
243234

244-
stream, size, err := p.fetcher(spec.BaseImageSrc).StreamBlob(logger, spec.BaseImageSrc, digest.BlobID)
235+
stream, size, err := p.fetcher.StreamBlob(logger, spec.BaseImageSrc, layerInfo)
245236
if err != nil {
246-
err = errorspkg.Wrapf(err, "streaming blob `%s`", digest.BlobID)
237+
err = errorspkg.Wrapf(err, "streaming blob `%s`", layerInfo.BlobID)
247238
}
248239

249240
logger.Debug("got-stream-for-blob", lager.Data{
@@ -255,12 +246,12 @@ func (p *BaseImagePuller) downloadLayer(logger lager.Logger, spec groot.BaseImag
255246
downloadChan <- downloadReturn{Stream: stream, Err: err}
256247
}
257248

258-
func (p *BaseImagePuller) unpackLayer(logger lager.Logger, digest, parentDigest LayerDigest, spec groot.BaseImageSpec, stream io.ReadCloser) error {
259-
logger = logger.Session("unpacking-layer", lager.Data{"LayerDigest": digest})
249+
func (p *BaseImagePuller) unpackLayer(logger lager.Logger, layerInfo, parentLayerInfo LayerInfo, spec groot.BaseImageSpec, stream io.ReadCloser) error {
250+
logger = logger.Session("unpacking-layer", lager.Data{"LayerInfo": layerInfo})
260251
logger.Debug("starting")
261252
defer logger.Debug("ending")
262253

263-
tempVolumeName, volumePath, err := p.createTemporaryVolumeDirectory(logger, digest, spec)
254+
tempVolumeName, volumePath, err := p.createTemporaryVolumeDirectory(logger, layerInfo, spec)
264255
if err != nil {
265256
return err
266257
}
@@ -270,25 +261,25 @@ func (p *BaseImagePuller) unpackLayer(logger lager.Logger, digest, parentDigest
270261
Stream: stream,
271262
UIDMappings: spec.UIDMappings,
272263
GIDMappings: spec.GIDMappings,
273-
BaseDirectory: digest.BaseDirectory,
264+
BaseDirectory: layerInfo.BaseDirectory,
274265
}
275266

276-
volSize, err := p.unpackLayerToTemporaryDirectory(logger, unpackSpec, digest, parentDigest)
267+
volSize, err := p.unpackLayerToTemporaryDirectory(logger, unpackSpec, layerInfo, parentLayerInfo)
277268
if err != nil {
278269
return err
279270
}
280271

281-
return p.finalizeVolume(logger, tempVolumeName, volumePath, digest.ChainID, volSize)
272+
return p.finalizeVolume(logger, tempVolumeName, volumePath, layerInfo.ChainID, volSize)
282273
}
283274

284-
func (p *BaseImagePuller) createTemporaryVolumeDirectory(logger lager.Logger, digest LayerDigest, spec groot.BaseImageSpec) (string, string, error) {
285-
tempVolumeName := fmt.Sprintf("%s-incomplete-%d-%d", digest.ChainID, time.Now().UnixNano(), rand.Int())
275+
func (p *BaseImagePuller) createTemporaryVolumeDirectory(logger lager.Logger, layerInfo LayerInfo, spec groot.BaseImageSpec) (string, string, error) {
276+
tempVolumeName := fmt.Sprintf("%s-incomplete-%d-%d", layerInfo.ChainID, time.Now().UnixNano(), rand.Int())
286277
volumePath, err := p.volumeDriver.CreateVolume(logger,
287-
digest.ParentChainID,
278+
layerInfo.ParentChainID,
288279
tempVolumeName,
289280
)
290281
if err != nil {
291-
return "", "", errorspkg.Wrapf(err, "creating volume for layer `%s`", digest.BlobID)
282+
return "", "", errorspkg.Wrapf(err, "creating volume for layer `%s`", layerInfo.BlobID)
292283
}
293284
logger.Debug("volume-created", lager.Data{"volumePath": volumePath})
294285

@@ -302,11 +293,11 @@ func (p *BaseImagePuller) createTemporaryVolumeDirectory(logger lager.Logger, di
302293
return tempVolumeName, volumePath, nil
303294
}
304295

305-
func (p *BaseImagePuller) unpackLayerToTemporaryDirectory(logger lager.Logger, unpackSpec UnpackSpec, digest, parentDigest LayerDigest) (volSize int64, err error) {
296+
func (p *BaseImagePuller) unpackLayerToTemporaryDirectory(logger lager.Logger, unpackSpec UnpackSpec, layerInfo, parentLayerInfo LayerInfo) (volSize int64, err error) {
306297
defer p.metricsEmitter.TryEmitDurationFrom(logger, MetricsUnpackTimeName, time.Now())
307298

308299
if unpackSpec.BaseDirectory != "" {
309-
parentPath, err := p.volumeDriver.VolumePath(logger, parentDigest.ChainID)
300+
parentPath, err := p.volumeDriver.VolumePath(logger, parentLayerInfo.ChainID)
310301
if err != nil {
311302
return 0, err
312303
}
@@ -318,10 +309,10 @@ func (p *BaseImagePuller) unpackLayerToTemporaryDirectory(logger lager.Logger, u
318309

319310
var unpackOutput UnpackOutput
320311
if unpackOutput, err = p.unpacker.Unpack(logger, unpackSpec); err != nil {
321-
if errD := p.volumeDriver.DestroyVolume(logger, digest.ChainID); errD != nil {
312+
if errD := p.volumeDriver.DestroyVolume(logger, layerInfo.ChainID); errD != nil {
322313
logger.Error("volume-cleanup-failed", errD)
323314
}
324-
return 0, errorspkg.Wrapf(err, "unpacking layer `%s`", digest.BlobID)
315+
return 0, errorspkg.Wrapf(err, "unpacking layer `%s`", layerInfo.BlobID)
325316
}
326317

327318
if err := p.volumeDriver.HandleOpaqueWhiteouts(logger, path.Base(unpackSpec.TargetPath), unpackOutput.OpaqueWhiteouts); err != nil {
@@ -346,10 +337,10 @@ func (p *BaseImagePuller) finalizeVolume(logger lager.Logger, tempVolumeName, vo
346337
return nil
347338
}
348339

349-
func (p *BaseImagePuller) layersSize(layerDigests []LayerDigest) int64 {
340+
func (p *BaseImagePuller) layersSize(layerInfos []LayerInfo) int64 {
350341
var totalSize int64
351-
for _, digest := range layerDigests {
352-
totalSize += digest.Size
342+
for _, layerInfo := range layerInfos {
343+
totalSize += layerInfo.Size
353344
}
354345
return totalSize
355346
}

0 commit comments

Comments
 (0)