From 492230e4b402bee640233cdfb962028b9a2b3f3c Mon Sep 17 00:00:00 2001 From: Harry Brundage Date: Sun, 5 May 2024 08:20:20 -0400 Subject: [PATCH] Implement a cache daemon that can move the cache into place quickly on demand - a long running background process we'll deploy as a daemonset on k8s nodes - moves the cache into place via hardlinking the golden copy from a shared location - operates using the host filesystem, and thus has pretty priviledged access --- internal/key/key.go | 2 +- internal/pb/cache.pb.go | 39 +++++++------ internal/pb/cache.proto | 2 +- pkg/api/cached.go | 104 ++++++++++++++--------------------- pkg/cached/cached.go | 6 +- pkg/cli/cachedaemon.go | 37 +++++-------- pkg/cli/populatediskcache.go | 15 ++--- pkg/client/client.go | 8 +-- test/cached_test.go | 82 +++++++++++++-------------- 9 files changed, 124 insertions(+), 171 deletions(-) diff --git a/internal/key/key.go b/internal/key/key.go index 44cd0ecd..70c90506 100644 --- a/internal/key/key.go +++ b/internal/key/key.go @@ -40,7 +40,7 @@ const ( Ignores = StringSliceKey("dl.ignores") DurationMS = DurationKey("dl.duration_ms") CloneToProject = Int64Key("dl.clone_to_project") - DownstreamID = StringKey("dl.downstream_id") + CachePath = StringKey("dl.cache_path") ) var ( diff --git a/internal/pb/cache.pb.go b/internal/pb/cache.pb.go index fe76d561..458fb1c4 100644 --- a/internal/pb/cache.pb.go +++ b/internal/pb/cache.pb.go @@ -25,7 +25,7 @@ type PopulateDiskCacheRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - DownstreamId string `protobuf:"bytes,1,opt,name=downstream_id,json=downstreamId,proto3" json:"downstream_id,omitempty"` + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` } func (x *PopulateDiskCacheRequest) Reset() { @@ -60,9 +60,9 @@ func (*PopulateDiskCacheRequest) Descriptor() ([]byte, []int) { return file_internal_pb_cache_proto_rawDescGZIP(), []int{0} } -func (x *PopulateDiskCacheRequest) GetDownstreamId() string { +func (x *PopulateDiskCacheRequest) GetPath() string { if x != nil { - return x.DownstreamId + return x.Path } return "" } @@ -118,24 +118,23 @@ var File_internal_pb_cache_proto protoreflect.FileDescriptor var file_internal_pb_cache_proto_rawDesc = []byte{ 0x0a, 0x17, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x62, 0x2f, 0x63, 0x61, - 0x63, 0x68, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x22, 0x3f, 0x0a, + 0x63, 0x68, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x22, 0x2e, 0x0a, 0x18, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x44, 0x69, 0x73, 0x6b, 0x43, 0x61, 0x63, - 0x68, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x6f, 0x77, - 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x64, 0x22, 0x35, - 0x0a, 0x19, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x44, 0x69, 0x73, 0x6b, 0x43, 0x61, - 0x63, 0x68, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x32, 0x59, 0x0a, 0x05, 0x43, 0x61, 0x63, 0x68, 0x65, 0x12, 0x50, - 0x0a, 0x11, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x44, 0x69, 0x73, 0x6b, 0x43, 0x61, - 0x63, 0x68, 0x65, 0x12, 0x1c, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, - 0x65, 0x44, 0x69, 0x73, 0x6b, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x1d, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x44, - 0x69, 0x73, 0x6b, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, - 0x61, 0x64, 0x67, 0x65, 0x74, 0x2d, 0x69, 0x6e, 0x63, 0x2f, 0x64, 0x61, 0x74, 0x65, 0x69, 0x6c, - 0x61, 0x67, 0x65, 0x72, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, + 0x68, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, + 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0x35, 0x0a, + 0x19, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x44, 0x69, 0x73, 0x6b, 0x43, 0x61, 0x63, + 0x68, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x32, 0x59, 0x0a, 0x05, 0x43, 0x61, 0x63, 0x68, 0x65, 0x12, 0x50, 0x0a, + 0x11, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x44, 0x69, 0x73, 0x6b, 0x43, 0x61, 0x63, + 0x68, 0x65, 0x12, 0x1c, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, + 0x44, 0x69, 0x73, 0x6b, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1d, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x44, 0x69, + 0x73, 0x6b, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, + 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x61, + 0x64, 0x67, 0x65, 0x74, 0x2d, 0x69, 0x6e, 0x63, 0x2f, 0x64, 0x61, 0x74, 0x65, 0x69, 0x6c, 0x61, + 0x67, 0x65, 0x72, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( diff --git a/internal/pb/cache.proto b/internal/pb/cache.proto index 7e9d143f..df257cd7 100644 --- a/internal/pb/cache.proto +++ b/internal/pb/cache.proto @@ -9,6 +9,6 @@ service Cache { returns (PopulateDiskCacheResponse); } -message PopulateDiskCacheRequest { string downstream_id = 1; } +message PopulateDiskCacheRequest { string path = 1; } message PopulateDiskCacheResponse { int64 version = 1; }; diff --git a/pkg/api/cached.go b/pkg/api/cached.go index afcc6051..c58ff3bb 100644 --- a/pkg/api/cached.go +++ b/pkg/api/cached.go @@ -8,8 +8,6 @@ import ( "fmt" "os" "path" - "path/filepath" - "regexp" "time" "github.com/gadget-inc/dateilager/internal/files" @@ -17,87 +15,76 @@ import ( "github.com/gadget-inc/dateilager/internal/logger" "github.com/gadget-inc/dateilager/internal/pb" "github.com/gadget-inc/dateilager/pkg/client" + "golang.org/x/sys/unix" ) -/** -* The set of paths configuring where a cache daemon will drop caches for downstream clients -* -* For example, if the paths have these values: -* StagingPath: /var/lib/dateilager/staging -* DownstreamsRootPath: /var/lib/dateilager/root -* DownstreamPathSuffix: /volumes/something -* -* the cache for a downstream with id=1234 will be placed at: -* /var/lib/dateilager/root/1234/volumes/something -* -* and the cache for a downstream with id=abcd will be placed at: -* /var/lib/dateilager/root/abcd/volumes/something -* -* Each will be hard linked from the golden copy stored in /var/lib/dateilager/staging/ - */ -type CachePaths struct { - // the root dir where we're going to stage downloaded caches */ - StagingPath string - // the root dir that holds one sub-directory per downstream client of this daemon */ - DownstreamsRootPath string - // a path to add to each downstream's dir when placing the cache - DownstreamPathSuffix string -} - type Cached struct { pb.UnimplementedCacheServer - Client *client.Client - Paths *CachePaths + Client *client.Client + StagingPath string // the current directory holding a fully formed downloaded cache currentDir string // the current version of the cache on disk at currentDir currentVersion int64 } -var validDownstreamIDRegexp = regexp.MustCompile(`^[a-zA-Z0-9-]+$`) - func (c *Cached) PopulateDiskCache(ctx context.Context, req *pb.PopulateDiskCacheRequest) (*pb.PopulateDiskCacheResponse, error) { - logger.Debug(ctx, "cached.PopulateDiskCache[Init]", - key.DownstreamID.Field(req.DownstreamId), - ) - // ensure potentially-hostile downstream id doesn't have `.` or other path traversal characters in it - if !validDownstreamIDRegexp.MatchString(req.DownstreamId) { - return nil, validationError(ctx, req, "invalid downstream id passed", errors.New("-")) + err := requireAdminAuth(ctx) + if err != nil { + return nil, err } - destination := path.Join(c.Paths.DownstreamsRootPath, req.DownstreamId, c.Paths.DownstreamPathSuffix) - destinationParentStat, err := os.Stat(filepath.Dir(destination)) + destination := req.Path + logger.Debug(ctx, "cached.PopulateDiskCache[Init]", + key.CachePath.Field(req.Path), + ) + version, err := c.WriteCache(destination) if err != nil { - return nil, validationError(ctx, req, "error validating destination directory", err) + return nil, err } - if !destinationParentStat.IsDir() { - return nil, validationError(ctx, req, "refusing to overwrite file with directory when building cache", err) - } + logger.Debug(ctx, "cached.PopulateDiskCache[Written]", + key.CachePath.Field(destination), + ) + return &pb.PopulateDiskCacheResponse{Version: version}, nil +} + +// check if the destination exists, and if so, if its writable +// hardlink the golden copy into this downstream's destination, creating it if need be +func (c *Cached) WriteCache(destination string) (int64, error) { if c.currentDir == "" { - return nil, validationError(ctx, req, "no cache prepared, currentDir is nil", err) + return -1, errors.New("no cache prepared, currentDir is nil") + } + + stat, err := os.Stat(destination) + if !os.IsNotExist(err) { + if err != nil { + return -1, fmt.Errorf("failed to stat cache destination %s: %v", destination, err) + } + + if !stat.IsDir() { + return -1, fmt.Errorf("failed to open cache destination %s for writing -- it is already a file", destination) + } + + if unix.Access(destination, unix.W_OK) != nil { + return -1, fmt.Errorf("failed to open cache destination %s for writing -- write permission denied", destination) + } } - // hardlink the golden copy into this downstream's destination, creating it if need be err = files.HardlinkDir(c.currentDir, destination) if err != nil { - return nil, validationError(ctx, req, "failed to hardlink cache", err) + return -1, fmt.Errorf("failed to hardlink cache to destination %s: %v", destination, err) } - - logger.Debug(ctx, "cached.PopulateDiskCache[Written]", - key.DownstreamID.Field(req.DownstreamId), - ) - - return &pb.PopulateDiskCacheResponse{Version: c.currentVersion}, nil + return c.currentVersion, nil } /** Fetch the cache into a spot in the staging dir */ func (c *Cached) Prepare(ctx context.Context) error { start := time.Now() - newDir := path.Join(c.Paths.StagingPath, randomString()) + newDir := path.Join(c.StagingPath, randomString()) version, count, err := c.Client.GetCache(ctx, newDir) if err != nil { return err @@ -118,14 +105,3 @@ func randomString() string { } return base64.URLEncoding.EncodeToString(randBytes) } - -func validationError(ctx context.Context, req *pb.PopulateDiskCacheRequest, msg string, err error) error { - logger.Warn( - ctx, - fmt.Sprintf("cached.PopulateDiskCache[error] %s: %v", msg, err), - key.DownstreamID.Field(req.DownstreamId), - ) - - // return no details to the caller as they are potentially an attacker from a sandboxed context - return errors.New("request failed") -} diff --git a/pkg/cached/cached.go b/pkg/cached/cached.go index 1a692c44..80cf16ac 100644 --- a/pkg/cached/cached.go +++ b/pkg/cached/cached.go @@ -25,7 +25,7 @@ type CacheServer struct { Cached *api.Cached } -func NewServer(ctx context.Context, client *client.Client, cert *tls.Certificate, paths *api.CachePaths) *CacheServer { +func NewServer(ctx context.Context, client *client.Client, cert *tls.Certificate, stagingPath string) *CacheServer { creds := credentials.NewServerTLSFromCert(cert) grpcServer := grpc.NewServer( @@ -50,8 +50,8 @@ func NewServer(ctx context.Context, client *client.Client, cert *tls.Certificate healthpb.RegisterHealthServer(grpcServer, healthServer) cached := &api.Cached{ - Client: client, - Paths: paths, + Client: client, + StagingPath: stagingPath, } pb.RegisterCacheServer(grpcServer, cached) diff --git a/pkg/cli/cachedaemon.go b/pkg/cli/cachedaemon.go index d8607be6..e2cb6f2f 100644 --- a/pkg/cli/cachedaemon.go +++ b/pkg/cli/cachedaemon.go @@ -15,7 +15,6 @@ import ( "github.com/gadget-inc/dateilager/internal/key" "github.com/gadget-inc/dateilager/internal/logger" "github.com/gadget-inc/dateilager/internal/telemetry" - "github.com/gadget-inc/dateilager/pkg/api" "github.com/gadget-inc/dateilager/pkg/cached" "github.com/gadget-inc/dateilager/pkg/client" "github.com/gadget-inc/dateilager/pkg/version" @@ -31,20 +30,19 @@ func NewCacheDaemonCommand() *cobra.Command { ) var ( - level *zapcore.Level - encoding string - tracing bool - profilePath string - upstreamHost string - upstreamPort uint16 - certFile string - keyFile string - port int - timeout uint - headlessHost string - stagingPath string - downstreamsPath string - downstreamsPathSuffix string + level *zapcore.Level + encoding string + tracing bool + profilePath string + upstreamHost string + upstreamPort uint16 + certFile string + keyFile string + port int + timeout uint + headlessHost string + stagingPath string + downstreamsPath string ) cmd := &cobra.Command{ @@ -107,13 +105,7 @@ func NewCacheDaemonCommand() *cobra.Command { return fmt.Errorf("failed to listen on TCP port %d: %w", port, err) } - paths := &api.CachePaths{ - StagingPath: stagingPath, - DownstreamsRootPath: downstreamsPath, - DownstreamPathSuffix: downstreamsPathSuffix, - } - - s := cached.NewServer(ctx, cl, &cert, paths) + s := cached.NewServer(ctx, cl, &cert, stagingPath) osSignals := make(chan os.Signal, 1) signal.Notify(osSignals, os.Interrupt, syscall.SIGTERM) @@ -159,7 +151,6 @@ func NewCacheDaemonCommand() *cobra.Command { flags.IntVar(&port, "port", 5053, "cache API port") flags.StringVar(&stagingPath, "staging-path", "", "path for staging downloaded caches") flags.StringVar(&downstreamsPath, "downstreams-path", "", "root path of where each downstream's directory sits") - flags.StringVar(&downstreamsPathSuffix, "downstream-path-suffix", "", "path where the cache will be deposited for each downstream consumer") _ = cmd.MarkPersistentFlagRequired("staging-path") _ = cmd.MarkPersistentFlagRequired("downstreams-path") diff --git a/pkg/cli/populatediskcache.go b/pkg/cli/populatediskcache.go index cf0baf30..7f456eea 100644 --- a/pkg/cli/populatediskcache.go +++ b/pkg/cli/populatediskcache.go @@ -8,17 +8,14 @@ import ( ) func NewCmdPopulateDiskCache() *cobra.Command { - var ( - downstreamID string - ) - cmd := &cobra.Command{ - Use: "populate-disk-cache", - RunE: func(cmd *cobra.Command, _ []string) error { + Use: "populate-disk-cache ", + Args: cobra.MatchAll(cobra.ExactArgs(1), cobra.OnlyValidArgs), + RunE: func(cmd *cobra.Command, args []string) error { ctx := cmd.Context() c := client.FromContext(ctx) - version, err := c.PopulateDiskCache(ctx, downstreamID) + version, err := c.PopulateDiskCache(ctx, args[0]) if err != nil { return err } @@ -29,9 +26,5 @@ func NewCmdPopulateDiskCache() *cobra.Command { }, } - cmd.Flags().StringVar(&downstreamID, "id", "", "ID of this client for populating the disk cache in the root") - - _ = cmd.MarkFlagRequired("id") - return cmd } diff --git a/pkg/client/client.go b/pkg/client/client.go index 83d06356..7da2d7ef 100644 --- a/pkg/client/client.go +++ b/pkg/client/client.go @@ -951,19 +951,19 @@ func (c *Client) CloneToProject(ctx context.Context, source int64, target int64, return &response.LatestVersion, nil } -func (c *Client) PopulateDiskCache(ctx context.Context, downstreamID string) (int64, error) { +func (c *Client) PopulateDiskCache(ctx context.Context, destination string) (int64, error) { ctx, span := telemetry.Start(ctx, "client.populate-disk-cache", trace.WithAttributes( - key.DownstreamID.Attribute(downstreamID), + key.CachePath.Attribute(destination), )) defer span.End() request := &pb.PopulateDiskCacheRequest{ - DownstreamId: downstreamID, + Path: destination, } response, err := c.cache.PopulateDiskCache(ctx, request) if err != nil { - return 0, fmt.Errorf("populate disk cache for %s: %w", downstreamID, err) + return 0, fmt.Errorf("populate disk cache for %s: %w", destination, err) } return response.Version, nil diff --git a/test/cached_test.go b/test/cached_test.go index 866d2433..cfda7585 100644 --- a/test/cached_test.go +++ b/test/cached_test.go @@ -10,13 +10,13 @@ import ( "github.com/gadget-inc/dateilager/internal/db" "github.com/gadget-inc/dateilager/internal/pb" util "github.com/gadget-inc/dateilager/internal/testutil" - "github.com/gadget-inc/dateilager/pkg/api" "github.com/gadget-inc/dateilager/pkg/cached" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestPopulateCache(t *testing.T) { - tc := util.NewTestCtx(t, auth.SharedReader, 1) + tc := util.NewTestCtx(t, auth.Admin, 1) defer tc.Close() writeProject(tc, 1, 2) @@ -30,21 +30,15 @@ func TestPopulateCache(t *testing.T) { defer close() tmpDir := emptyTmpDir(t) - // defer os.RemoveAll(tmpDir) - paths := &api.CachePaths{ - StagingPath: path.Join(tmpDir, "staging"), - DownstreamsRootPath: path.Join(tmpDir, "pods"), - DownstreamPathSuffix: "volumes/some-volume/dl_cache", - } - require.NoError(t, os.MkdirAll(path.Join(tmpDir, "pods/test/volumes/some-volume"), 0755), "couldn't make test dir") - - s := cached.NewServer(tc.Context(), c, testCert(), paths) + defer os.RemoveAll(tmpDir) + + s := cached.NewServer(tc.Context(), c, testCert(), path.Join(tmpDir, "staging")) require.NoError(t, s.Cached.Prepare(tc.Context()), "cached.Prepare must succeed") - _, err = s.Cached.PopulateDiskCache(tc.Context(), &pb.PopulateDiskCacheRequest{DownstreamId: "test"}) + _, err = s.Cached.PopulateDiskCache(tc.Context(), &pb.PopulateDiskCacheRequest{Path: path.Join(tmpDir, "test")}) require.NoError(t, err, "Cached.PopulateDiskCache") - verifyDir(t, path.Join(tmpDir, "pods/test/volumes/some-volume/dl_cache"), -1, map[string]expectedFile{ + verifyDir(t, path.Join(tmpDir, "test"), -1, map[string]expectedFile{ "objects/c4d899b73f3d61e159d3194f4d96f1c4cd451f6ef5668dbc4ff5c2334407bbcd/pack/a/1": {content: "pack/a/1 v1"}, "objects/c4d899b73f3d61e159d3194f4d96f1c4cd451f6ef5668dbc4ff5c2334407bbcd/pack/a/2": {content: "pack/a/2 v1"}, "objects/8b5e70f3904d1f7ce3374068c137555c8b9c75a3a01d223f89059e84630493ce/pack/b/1": {content: "pack/b/1 v1"}, @@ -53,58 +47,58 @@ func TestPopulateCache(t *testing.T) { }) } -func TestPopulateCacheHostileDownstreamId(t *testing.T) { - tc := util.NewTestCtx(t, auth.SharedReader, 1) +func TestPopulateEmptyCache(t *testing.T) { + tc := util.NewTestCtx(t, auth.Admin, 1) defer tc.Close() + writeProject(tc, 1, 2) + writeObject(tc, 1, 1, i(2), "a", "a v1") + // no packed files, so no cache + version, err := db.CreateCache(tc.Context(), tc.Connect(), "", 100) + require.NoError(t, err) + assert.NotEqual(t, int64(-1), version) + c, _, close := createTestClient(tc) defer close() tmpDir := emptyTmpDir(t) defer os.RemoveAll(tmpDir) - paths := &api.CachePaths{ - StagingPath: path.Join(tmpDir, "staging"), - DownstreamsRootPath: path.Join(tmpDir, "pods"), - DownstreamPathSuffix: "volumes/some-volume/dl_cache", - } - s := cached.NewServer(tc.Context(), c, testCert(), paths) + s := cached.NewServer(tc.Context(), c, testCert(), path.Join(tmpDir, "staging")) require.NoError(t, s.Cached.Prepare(tc.Context()), "cached.Prepare must succeed") - _, err := s.Cached.PopulateDiskCache(tc.Context(), &pb.PopulateDiskCacheRequest{DownstreamId: "../../foo"}) - require.Error(t, err, "invalid pod UID") + _, err = s.Cached.PopulateDiskCache(tc.Context(), &pb.PopulateDiskCacheRequest{Path: path.Join(tmpDir, "test")}) + require.NoError(t, err, "Cached.PopulateDiskCache") + + verifyDir(t, path.Join(tmpDir, "test"), -1, map[string]expectedFile{ + "objects/": {content: "", fileType: typeDirectory}, + }) } -func TestPopulateCacheNonExistingSuffix(t *testing.T) { - tc := util.NewTestCtx(t, auth.SharedReader, 1) +func TestPopulateCacheToPathWithNoWritePermissions(t *testing.T) { + tc := util.NewTestCtx(t, auth.Admin, 1) defer tc.Close() + writeProject(tc, 1, 2) + writeObject(tc, 1, 1, i(2), "a", "a v1") + writePackedFiles(tc, 1, 1, nil, "pack/a") + writePackedFiles(tc, 1, 1, nil, "pack/b") + _, err := db.CreateCache(tc.Context(), tc.Connect(), "", 100) + require.NoError(t, err) + c, _, close := createTestClient(tc) defer close() tmpDir := emptyTmpDir(t) defer os.RemoveAll(tmpDir) - paths := &api.CachePaths{ - StagingPath: path.Join(tmpDir, "staging"), - DownstreamsRootPath: path.Join(tmpDir, "pods"), - DownstreamPathSuffix: "volumes/some-volume/dl_cache", - } - s := cached.NewServer(tc.Context(), c, testCert(), paths) + s := cached.NewServer(tc.Context(), c, testCert(), path.Join(tmpDir, "staging")) require.NoError(t, s.Cached.Prepare(tc.Context()), "cached.Prepare must succeed") - // we try to populate the cache for a downstreaam where segments of the cache path don't exist yet - // the only segment we're willing to create is the very last one - _, err := s.Cached.PopulateDiskCache(tc.Context(), &pb.PopulateDiskCacheRequest{DownstreamId: "foo"}) - require.Error(t, err, "invalid pod UID") - - // make the downstream's root directory and one of the paths on the way to the suffix, but not the full suffix - require.NoError(t, os.MkdirAll(path.Join(tmpDir, "pods/foo/volumes"), 0755), "couldn't make test dir") - _, err = s.Cached.PopulateDiskCache(tc.Context(), &pb.PopulateDiskCacheRequest{DownstreamId: "foo"}) - require.Error(t, err, "something") - - // make the second-to-last segment of the path suffix, cached is willing to make the last one - require.NoError(t, os.MkdirAll(path.Join(tmpDir, "pods/foo/volumes/some-volume"), 0755), "couldn't make test dir") - _, err = s.Cached.PopulateDiskCache(tc.Context(), &pb.PopulateDiskCacheRequest{DownstreamId: "foo"}) + // Create a directory with no write permissions + err = os.Mkdir(path.Join(tmpDir, "test"), 0000) require.NoError(t, err) + + _, err = s.Cached.PopulateDiskCache(tc.Context(), &pb.PopulateDiskCacheRequest{Path: path.Join(tmpDir, "test")}) + require.Error(t, err, "populating cache to a path with no write permissions must fail") }