diff --git a/acceptance/openstack/objectstorage/v1/objects_test.go b/acceptance/openstack/objectstorage/v1/objects_test.go new file mode 100644 index 00000000..3ecf5e66 --- /dev/null +++ b/acceptance/openstack/objectstorage/v1/objects_test.go @@ -0,0 +1,505 @@ +// +build acceptance + +package v1 + +import ( + "bytes" + "io/ioutil" + "os" + "path" + "testing" + + "github.com/gophercloud/gophercloud/acceptance/tools" + th "github.com/gophercloud/gophercloud/testhelper" + + "github.com/gophercloud/utils/openstack/clientconfig" + "github.com/gophercloud/utils/openstack/objectstorage/v1/objects" +) + +func TestObjectStreamingUploadDownload(t *testing.T) { + client, err := clientconfig.NewServiceClient("object-store", nil) + th.AssertNoErr(t, err) + + // Create a test container to store the object. + cName, err := CreateContainer(t, client) + th.AssertNoErr(t, err) + defer DeleteContainer(t, client, cName) + + // Generate a random object name and random content. + oName := tools.RandomString("test-object-", 8) + content := tools.RandomString("", 10) + contentBuf := bytes.NewBuffer([]byte(content)) + + // Upload the object + uploadOpts := &objects.UploadOpts{ + Content: contentBuf, + } + uploadResult, err := objects.Upload(client, cName, oName, uploadOpts) + defer DeleteObject(t, client, cName, oName) + th.AssertNoErr(t, err) + th.AssertEquals(t, uploadResult.Success, true) + + tools.PrintResource(t, uploadResult) + + // Download the object + downloadOpts := &objects.DownloadOpts{ + OutFile: "-", + } + downloadResults, err := objects.Download(client, cName, []string{oName}, downloadOpts) + th.AssertNoErr(t, err) + + th.AssertEquals(t, len(downloadResults), 1) + th.AssertEquals(t, downloadResults[0].Success, true) + downloadedContent, err := ioutil.ReadAll(downloadResults[0].Content) + th.AssertNoErr(t, err) + th.AssertEquals(t, string(downloadedContent), content) + + tools.PrintResource(t, downloadResults[0]) +} + +func TestObjectFileUploadDownload(t *testing.T) { + client, err := clientconfig.NewServiceClient("object-store", nil) + th.AssertNoErr(t, err) + + // Create a file with random content + source, err := CreateRandomFile(t, "/tmp") + th.AssertNoErr(t, err) + defer DeleteTempFile(t, source) + + // Create a destination file. + dest := tools.RandomString("/tmp/test-dest-", 8) + + // Create a random object name. + oName := tools.RandomString("test-object-", 8) + + // Create a test container to store the object. + cName, err := CreateContainer(t, client) + th.AssertNoErr(t, err) + defer DeleteContainer(t, client, cName) + + // Upload the object + uploadOpts := &objects.UploadOpts{ + Path: source, + } + + uploadResult, err := objects.Upload(client, cName, oName, uploadOpts) + defer DeleteObject(t, client, cName, oName) + th.AssertNoErr(t, err) + th.AssertEquals(t, uploadResult.Success, true) + + tools.PrintResource(t, uploadResult) + + // Download the object + downloadOpts := &objects.DownloadOpts{ + OutFile: dest, + } + downloadResults, err := objects.Download(client, cName, []string{oName}, downloadOpts) + th.AssertNoErr(t, err) + defer DeleteTempFile(t, dest) + + th.AssertEquals(t, len(downloadResults), 1) + th.AssertEquals(t, downloadResults[0].Success, true) + + tools.PrintResource(t, downloadResults[0]) + + equals, err := CompareFiles(t, source, dest) + th.AssertNoErr(t, err) + th.AssertEquals(t, equals, true) +} + +func TestObjectStreamingSLO(t *testing.T) { + client, err := clientconfig.NewServiceClient("object-store", nil) + th.AssertNoErr(t, err) + + // Create a test container to store the object. + cName, err := CreateContainer(t, client) + th.AssertNoErr(t, err) + defer DeleteContainer(t, client, cName) + defer DeleteContainer(t, client, cName+"_segments") + + // Generate a random object name and random content. + oName := tools.RandomString("test-object-", 8) + content := tools.RandomString("", 256) + contentBuf := bytes.NewBuffer([]byte(content)) + + // Upload the object + uploadOpts := &objects.UploadOpts{ + Checksum: true, + Content: contentBuf, + SegmentSize: 62, + UseSLO: true, + } + uploadResult, err := objects.Upload(client, cName, oName, uploadOpts) + defer DeleteObject(t, client, cName, oName) + th.AssertNoErr(t, err) + th.AssertEquals(t, uploadResult.Success, true) + + tools.PrintResource(t, uploadResult) + + // Download the object + downloadOpts := &objects.DownloadOpts{ + OutFile: "-", + } + downloadResults, err := objects.Download(client, cName, []string{oName}, downloadOpts) + th.AssertNoErr(t, err) + + th.AssertEquals(t, len(downloadResults), 1) + th.AssertEquals(t, downloadResults[0].Success, true) + + tools.PrintResource(t, downloadResults[0]) + + // Compare the downloaded content with the uploaded. + downloadedContent, err := ioutil.ReadAll(downloadResults[0].Content) + th.AssertNoErr(t, err) + th.AssertEquals(t, string(downloadedContent), content) + + // Replace the object with the same object. + contentBuf = bytes.NewBuffer([]byte(content)) + uploadOpts.Content = contentBuf + uploadResult, err = objects.Upload(client, cName, oName, uploadOpts) + th.AssertNoErr(t, err) + th.AssertEquals(t, uploadResult.Success, true) + + tools.PrintResource(t, uploadResult) + + // Download the object + downloadResults, err = objects.Download(client, cName, []string{oName}, downloadOpts) + th.AssertNoErr(t, err) + th.AssertEquals(t, len(downloadResults), 1) + th.AssertEquals(t, downloadResults[0].Success, true) + + tools.PrintResource(t, downloadResults[0]) + + // Compare the downloaded content with the uploaded. + downloadedContent, err = ioutil.ReadAll(downloadResults[0].Content) + th.AssertNoErr(t, err) + th.AssertEquals(t, string(downloadedContent), content) +} + +func TestObjectFileSLO(t *testing.T) { + client, err := clientconfig.NewServiceClient("object-store", nil) + th.AssertNoErr(t, err) + + // Create a file with random content + source, err := CreateRandomFile(t, "/tmp") + th.AssertNoErr(t, err) + defer DeleteTempFile(t, source) + + // Create a destination file. + dest := tools.RandomString("/tmp/test-dest-", 8) + defer DeleteTempFile(t, dest) + + // Create a random object name. + oName := tools.RandomString("test-object-", 8) + + // Create a test container to store the object. + cName, err := CreateContainer(t, client) + th.AssertNoErr(t, err) + defer DeleteContainer(t, client, cName) + defer DeleteContainer(t, client, cName+"_segments") + + // Upload the object + uploadOpts := &objects.UploadOpts{ + Path: source, + SegmentSize: 62, + UseSLO: true, + } + + uploadResult, err := objects.Upload(client, cName, oName, uploadOpts) + defer DeleteObject(t, client, cName, oName) + th.AssertNoErr(t, err) + th.AssertEquals(t, uploadResult.Success, true) + + tools.PrintResource(t, uploadResult) + + // Download the object + downloadOpts := &objects.DownloadOpts{ + OutFile: dest, + } + downloadResults, err := objects.Download(client, cName, []string{oName}, downloadOpts) + th.AssertNoErr(t, err) + + th.AssertEquals(t, len(downloadResults), 1) + th.AssertEquals(t, downloadResults[0].Success, true) + + tools.PrintResource(t, downloadResults[0]) + + equals, err := CompareFiles(t, source, dest) + th.AssertNoErr(t, err) + th.AssertEquals(t, equals, true) + + tools.PrintResource(t, downloadResults[0]) + + // Replace the object with the same object. + uploadResult, err = objects.Upload(client, cName, oName, uploadOpts) + th.AssertNoErr(t, err) + th.AssertEquals(t, uploadResult.Success, true) + + tools.PrintResource(t, uploadResult) + + // Download the object + downloadResults, err = objects.Download(client, cName, []string{oName}, downloadOpts) + th.AssertNoErr(t, err) + + th.AssertEquals(t, len(downloadResults), 1) + th.AssertEquals(t, downloadResults[0].Success, true) + + tools.PrintResource(t, downloadResults[0]) + + equals, err = CompareFiles(t, source, dest) + th.AssertNoErr(t, err) + th.AssertEquals(t, equals, true) + + tools.PrintResource(t, downloadResults[0]) + + // Replace the object with the same object. + // But skip identical segments + uploadOpts.SkipIdentical = true + uploadResult, err = objects.Upload(client, cName, oName, uploadOpts) + th.AssertNoErr(t, err) + th.AssertEquals(t, uploadResult.Success, true) + th.AssertEquals(t, uploadResult.Status, "skip-identical") + + tools.PrintResource(t, uploadResult) + + // Replace the object with the same object. + // But only if changed. + uploadOpts.SkipIdentical = false + uploadOpts.Changed = true + uploadResult, err = objects.Upload(client, cName, oName, uploadOpts) + th.AssertNoErr(t, err) + th.AssertEquals(t, uploadResult.Success, true) + th.AssertEquals(t, uploadResult.Status, "skip-changed") + + tools.PrintResource(t, uploadResult) +} + +func TestObjectFileDLO(t *testing.T) { + client, err := clientconfig.NewServiceClient("object-store", nil) + th.AssertNoErr(t, err) + + // Create a file with random content + source, err := CreateRandomFile(t, "/tmp") + th.AssertNoErr(t, err) + defer DeleteTempFile(t, source) + + // Create a destination file. + dest := tools.RandomString("/tmp/test-dest-", 8) + defer DeleteTempFile(t, dest) + + // Create a random object name. + oName := tools.RandomString("test-object-", 8) + + // Create a test container to store the object. + cName, err := CreateContainer(t, client) + th.AssertNoErr(t, err) + defer DeleteContainer(t, client, cName) + defer DeleteContainer(t, client, cName+"_segments") + + // Upload the object + uploadOpts := &objects.UploadOpts{ + Checksum: true, + Path: source, + SegmentSize: 62, + } + + uploadResult, err := objects.Upload(client, cName, oName, uploadOpts) + defer DeleteObject(t, client, cName, oName) + th.AssertNoErr(t, err) + th.AssertEquals(t, uploadResult.Success, true) + + tools.PrintResource(t, uploadResult) + + // Download the object + downloadOpts := &objects.DownloadOpts{ + OutFile: dest, + } + downloadResults, err := objects.Download(client, cName, []string{oName}, downloadOpts) + th.AssertNoErr(t, err) + + th.AssertEquals(t, len(downloadResults), 1) + th.AssertEquals(t, downloadResults[0].Success, true) + + tools.PrintResource(t, downloadResults[0]) + + equals, err := CompareFiles(t, source, dest) + th.AssertNoErr(t, err) + th.AssertEquals(t, equals, true) + + // Replace the object with the same object. + uploadResult, err = objects.Upload(client, cName, oName, uploadOpts) + th.AssertNoErr(t, err) + th.AssertEquals(t, uploadResult.Success, true) + + tools.PrintResource(t, uploadResult) + + // Download the object + downloadResults, err = objects.Download(client, cName, []string{oName}, downloadOpts) + th.AssertNoErr(t, err) + + th.AssertEquals(t, len(downloadResults), 1) + th.AssertEquals(t, downloadResults[0].Success, true) + + tools.PrintResource(t, downloadResults[0]) + + equals, err = CompareFiles(t, source, dest) + th.AssertNoErr(t, err) + th.AssertEquals(t, equals, true) + + tools.PrintResource(t, downloadResults[0]) + + // Replace the object with the same object. + // But skip identical segments + uploadOpts.SkipIdentical = true + uploadResult, err = objects.Upload(client, cName, oName, uploadOpts) + th.AssertNoErr(t, err) + th.AssertEquals(t, uploadResult.Success, true) + th.AssertEquals(t, uploadResult.Status, "skip-identical") + + tools.PrintResource(t, uploadResult) + + // Replace the object with the same object. + // But only if changed. + uploadOpts.SkipIdentical = false + uploadOpts.Changed = true + uploadResult, err = objects.Upload(client, cName, oName, uploadOpts) + th.AssertNoErr(t, err) + th.AssertEquals(t, uploadResult.Success, true) + th.AssertEquals(t, uploadResult.Status, "skip-changed") + + tools.PrintResource(t, uploadResult) +} + +func TestObjectPseudoDirBasic(t *testing.T) { + client, err := clientconfig.NewServiceClient("object-store", nil) + th.AssertNoErr(t, err) + + // Create a test container to store the object. + cName, err := CreateContainer(t, client) + th.AssertNoErr(t, err) + defer DeleteContainer(t, client, cName) + + // Generate a random object name and random content. + oName := tools.RandomString("test-object-", 8) + + // Create the directory marker + uploadOpts := &objects.UploadOpts{ + DirMarker: true, + } + uploadResult, err := objects.Upload(client, cName, oName, uploadOpts) + defer DeleteObject(t, client, cName, oName) + th.AssertNoErr(t, err) + th.AssertEquals(t, uploadResult.Success, true) + + tools.PrintResource(t, uploadResult) + + // Get the object + obj, err := GetObject(client, cName, oName) + th.AssertNoErr(t, err) + th.AssertEquals(t, obj.ContentType, "application/directory") +} + +func TestObjectPseudoDirFileStructure(t *testing.T) { + client, err := clientconfig.NewServiceClient("object-store", nil) + th.AssertNoErr(t, err) + + // Create a test container to store the object. + cName, err := CreateContainer(t, client) + th.AssertNoErr(t, err) + defer DeleteContainer(t, client, cName) + + // Create a temporary directory to hold files. + parentDir, err := CreateTempDir(t, "/tmp") + th.AssertNoErr(t, err) + defer DeleteTempDir(t, parentDir) + + oName := path.Base(parentDir) + + // Upload the directory + uploadOpts := &objects.UploadOpts{ + Path: parentDir, + } + uploadResult, err := objects.Upload(client, cName, oName, uploadOpts) + defer DeleteObject(t, client, cName, oName) + th.AssertNoErr(t, err) + th.AssertEquals(t, uploadResult.Success, true) + + tools.PrintResource(t, uploadResult) + + // Create a file with random content + source, err := CreateRandomFile(t, parentDir) + th.AssertNoErr(t, err) + defer DeleteTempFile(t, source) + + oName = path.Join(oName, path.Base(source)) + + // Upload the file. + uploadOpts.Path = source + uploadResult, err = objects.Upload(client, cName, oName, uploadOpts) + defer DeleteObject(t, client, cName, oName) + th.AssertNoErr(t, err) + th.AssertEquals(t, uploadResult.Success, true) + + tools.PrintResource(t, uploadResult) + + // Create a nested directory to hold files. + nestedDir, err := CreateTempDir(t, parentDir) + th.AssertNoErr(t, err) + defer DeleteTempDir(t, nestedDir) + + oName = path.Join(path.Base(parentDir), path.Base(nestedDir)) + + // Upload the nested directory + uploadOpts.Path = nestedDir + uploadResult, err = objects.Upload(client, cName, oName, uploadOpts) + defer DeleteObject(t, client, cName, oName) + th.AssertNoErr(t, err) + th.AssertEquals(t, uploadResult.Success, true) + + tools.PrintResource(t, uploadResult) + + // Create a file in the nested directory with random content + nestedSource, err := CreateRandomFile(t, nestedDir) + th.AssertNoErr(t, err) + defer DeleteTempFile(t, nestedSource) + + oName = path.Join(oName, path.Base(nestedSource)) + + // Upload the file. + uploadOpts.Path = nestedSource + uploadResult, err = objects.Upload(client, cName, oName, uploadOpts) + defer DeleteObject(t, client, cName, oName) + th.AssertNoErr(t, err) + th.AssertEquals(t, uploadResult.Success, true) + + tools.PrintResource(t, uploadResult) + + // Create a temporary directory to download files. + downloadDir, err := CreateTempDir(t, "/tmp") + th.AssertNoErr(t, err) + defer DeleteTempDir(t, downloadDir) + + // Download the container to downloadDir + downloadOpts := &objects.DownloadOpts{ + OutDirectory: downloadDir, + } + downloadResults, err := objects.Download(client, cName, []string{}, downloadOpts) + th.AssertNoErr(t, err) + + // Compare the downloaded content + for _, dr := range downloadResults { + pseudoDir := dr.PseudoDir + stat, err := os.Stat(dr.Path) + th.AssertNoErr(t, err) + th.AssertEquals(t, stat.IsDir(), pseudoDir) + + if !pseudoDir { + v := path.Join("/tmp", dr.Object) + equals, err := CompareFiles(t, v, dr.Path) + th.AssertNoErr(t, err) + th.AssertEquals(t, equals, true) + } + } + + tools.PrintResource(t, downloadResults) +} diff --git a/acceptance/openstack/objectstorage/v1/objectstorage.go b/acceptance/openstack/objectstorage/v1/objectstorage.go new file mode 100644 index 00000000..b7777469 --- /dev/null +++ b/acceptance/openstack/objectstorage/v1/objectstorage.go @@ -0,0 +1,152 @@ +package v1 + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers" + "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects" +) + +// CompareFiles will compare two files +func CompareFiles(t *testing.T, file1, file2 string) (bool, error) { + f1, err := os.Open(file1) + if err != nil { + return false, fmt.Errorf("unable to open %s: %s", file1, err) + } + defer f1.Close() + + f2, err := os.Open(file2) + if err != nil { + return false, fmt.Errorf("unable to open %s: %s", file2, err) + } + defer f2.Close() + + contents1, err := ioutil.ReadAll(f1) + if err != nil { + return false, fmt.Errorf("unable to read %s: %s", file1, err) + } + + contents2, err := ioutil.ReadAll(f2) + if err != nil { + return false, fmt.Errorf("unable to read %s: %s", file2, err) + } + + equal := bytes.Equal(contents1, contents2) + + return equal, nil +} + +// CreateContainer will create a container with a random name. +func CreateContainer(t *testing.T, client *gophercloud.ServiceClient) (string, error) { + cName := tools.RandomString("test-container-", 8) + res := containers.Create(client, cName, nil) + + t.Logf("creating container: %s", cName) + + return cName, res.Err +} + +// CreateRandomFile will create a file with random content. +func CreateRandomFile(t *testing.T, parentDir string) (string, error) { + tmpfile, err := CreateTempFile(t, parentDir) + if err != nil { + return "", fmt.Errorf("unable to create random file: %s", err) + } + + content := tools.RandomString("", 256) + tmpfile.Write([]byte(content)) + tmpfile.Close() + + return tmpfile.Name(), nil +} + +// CreateTempDir will create and return a temp directory. +func CreateTempDir(t *testing.T, parentDir string) (string, error) { + dirName, err := ioutil.TempDir(parentDir, "test-dir-") + if err != nil { + return "", err + } + + t.Logf("creating tempdir: %s", dirName) + return dirName, nil +} + +// CreateTempFile will create and return a temp file. +func CreateTempFile(t *testing.T, dir string) (*os.File, error) { + fileName := tools.RandomString("test-file-", 8) + t.Logf("creating tempfile: %s", fileName) + return ioutil.TempFile(dir, fileName) +} + +// DeleteContainer will delete a container. A fatal error will occur if the +// container failed to be deleted. This works best when used as a deferred +// function. +func DeleteContainer(t *testing.T, client *gophercloud.ServiceClient, cName string) { + t.Logf("deleting container %s", cName) + + allPages, err := objects.List(client, cName, nil).AllPages() + if err != nil { + t.Fatalf("unable to list container %s: %s", cName, err) + } + + allObjects, err := objects.ExtractNames(allPages) + if err != nil { + t.Fatalf("unable to extract container %s: %s", cName, err) + } + + for _, oName := range allObjects { + res := objects.Delete(client, cName, oName, nil) + if res.Err != nil { + t.Fatalf("unable to delete object: %s/%s: %s", cName, oName, oName) + } + } + + res := containers.Delete(client, cName) + if res.Err != nil { + t.Fatalf("unable to delete container %s: %s", cName, res.Err) + } +} + +// DeleteObject will delete an object. A fatal error will occur if the object +// failed to be deleted. This works best when used as a deferred function. +func DeleteObject(t *testing.T, client *gophercloud.ServiceClient, cName, oName string) { + t.Logf("deleting object %s/%s", cName, oName) + + res := objects.Delete(client, cName, oName, nil) + if res.Err != nil { + t.Fatalf("unable to delete object %s/%s: %s", cName, oName, res.Err) + } +} + +// DeleteTempFile will delete a temporary file. A fatal error will occur if the +// file could not be deleted. This works best when used as a deferred function. +func DeleteTempFile(t *testing.T, fileName string) { + t.Logf("deleting tempfile %s", fileName) + + if err := os.Remove(fileName); err != nil { + t.Fatalf("unable to delete tempfile %s: %s", fileName, err) + } +} + +// DeleteTempDir will delete a temporary directory. A fatal error will occur if +// the directory could not be deleted. This works best when used as a deferred +// function. +func DeleteTempDir(t *testing.T, dirName string) { + t.Logf("deleting tempdir %s", dirName) + + if err := os.RemoveAll(dirName); err != nil { + t.Fatalf("unable to delete tempdir %s: %s", dirName, err) + } +} + +// GetObject is an alias to objects.GetObject so we don't have to import +// gophercloud/gophercloud into objects_test.go and make things confusing. +func GetObject(client *gophercloud.ServiceClient, cName, oName string) (*objects.GetHeader, error) { + return objects.Get(client, cName, oName, nil).Extract() +} diff --git a/openstack/objectstorage/v1/objects/download.go b/openstack/objectstorage/v1/objects/download.go new file mode 100644 index 00000000..8f8cda01 --- /dev/null +++ b/openstack/objectstorage/v1/objects/download.go @@ -0,0 +1,383 @@ +package objects + +import ( + "fmt" + "io" + "os" + "path" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers" + "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects" + "github.com/gophercloud/gophercloud/pagination" +) + +// DownloadOpts represents options used for downloading an object. +type DownloadOpts struct { + // Delimiter is a delimiter to specify for listing objects. + Delimiter string + + // IgnoreMtime won't update the downloaded file's mtime. + IgnoreMtime bool + + // NoDownload won't actually download the object. + NoDownload bool + + // OutDirectory is a directory to save the objects to. + OutDirectory string + + // OutFile is a file to save the object to. + OutFile string + + // Prefix is a prefix string for a container. + Prefix string + + // RemovePrefix will remove the prefix from the container. + RemovePrefix bool + + // SkipIdentical will skip identical objects already downloaded. + SkipIdentical bool + + // YesAll will download everything. + YesAll bool +} + +// Download downloads one or more objects from an Object Storage account. +// It is roughly based on the python-swiftclient implementation: +// +// https://github.com/openstack/python-swiftclient/blob/e65070964c7b1e04119c87e5f344d39358780d18/swiftclient/service.py#L1024 +func Download(client *gophercloud.ServiceClient, containerName string, objectNames []string, opts *DownloadOpts) ([]DownloadResult, error) { + var downloadResults []DownloadResult + + if strings.Contains(containerName, "/") { + return nil, fmt.Errorf("container name %s contains a /", containerName) + } + + if containerName == "" { + if opts.YesAll { + // Download everything + listOpts := containers.ListOpts{ + Full: true, + Delimiter: opts.Delimiter, + Prefix: opts.Prefix, + } + + err := containers.List(client, listOpts).EachPage(func(page pagination.Page) (bool, error) { + containerList, err := containers.ExtractInfo(page) + if err != nil { + return false, fmt.Errorf("error listing containers: %s", err) + } + + for _, c := range containerList { + results, err := downloadContainer(client, c.Name, opts) + if err != nil { + return false, fmt.Errorf("error downloading container %s: %s", c.Name, err) + } + + downloadResults = append(downloadResults, results...) + } + + return true, nil + }) + + if err != nil { + return nil, fmt.Errorf("error downloading container %s: %s", containerName, err) + } + + return downloadResults, nil + } + } + + if len(objectNames) == 0 { + results, err := downloadContainer(client, containerName, opts) + if err != nil { + return nil, fmt.Errorf("error downloading container %s: %s", containerName, err) + } + downloadResults = append(downloadResults, results...) + + return downloadResults, nil + } + + for _, objectName := range objectNames { + result, err := downloadObject(client, containerName, objectName, opts) + if err != nil { + return nil, fmt.Errorf("error downloading object %s/%s: %s", containerName, objectName, err) + } + downloadResults = append(downloadResults, *result) + } + + return downloadResults, nil +} + +// downloadObject will download a specified object. +func downloadObject(client *gophercloud.ServiceClient, containerName string, objectName string, opts *DownloadOpts) (*DownloadResult, error) { + var objectDownloadOpts objects.DownloadOpts + var pseudoDir bool + + // Perform a get on the object in order to get its metadata. + originalObject := objects.Get(client, containerName, objectName, nil) + if originalObject.Err != nil { + return nil, fmt.Errorf("error retrieving object %s/%s: %s", containerName, objectName, originalObject.Err) + } + + originalMetadata, err := originalObject.ExtractMetadata() + if err != nil { + return nil, fmt.Errorf("error extracting object metadata for %s/%s: %s", containerName, objectName, err) + } + + objectPath := objectName + if opts.YesAll { + objectPath = path.Join(containerName, objectName) + } + + // SkipIdentical is not possible when stdout has been specified. + opts.SkipIdentical = opts.SkipIdentical && opts.OutFile != "-" + + if opts.Prefix != "" && opts.RemovePrefix { + objectPath = string(objectPath[len(opts.Prefix):]) + } + + if opts.OutDirectory != "" { + objectPath = path.Join(opts.OutDirectory, objectName) + } + + filename := objectPath + if opts.OutFile != "" && opts.OutFile != "-" { + filename = opts.OutFile + } + + // SkipIdentical will get the md5sum of the existing local file. + // It'll use it in the If-None-Match header. + if opts.SkipIdentical { + objectDownloadOpts.MultipartManifest = "get" + + md5, err := FileMD5Sum(filename) + if err != nil && !os.IsNotExist(err) { + return nil, fmt.Errorf("error getting md5sum of file %s: %s", filename, err) + } + + if md5 != "" { + objectDownloadOpts.IfNoneMatch = md5 + } + } + + // Attempt to download the object + res := objects.Download(client, containerName, objectName, objectDownloadOpts) + if res.Err != nil { + // Ignore the error if SkipIdentical is set. + // This is because a second attempt to download the object will happen later. + if !opts.SkipIdentical { + return nil, fmt.Errorf("error getting object %s/%s: %s", containerName, objectName, res.Err) + } + } + + headers, err := res.Extract() + if err != nil { + return nil, fmt.Errorf("error extracting headers from %s: %s", objectName, err) + } + + if opts.SkipIdentical { + // Determine if the downloaded object has a manifest or is a Static Large + // Object. + // + // This is a little odd. It should be doing the same thing that + // python-swiftclient is doing, though. + var hasManifest bool + var manifest string + if headers.ObjectManifest != "" { + hasManifest = true + manifest = "" + } + + if headers.StaticLargeObject { + hasManifest = true + manifest = "[]" + } + + if hasManifest { + mo := GetManifestOpts{ + ContainerName: containerName, + ContentLength: headers.ContentLength, + ETag: headers.ETag, + ObjectName: objectName, + ObjectManifest: headers.ObjectManifest, + Manifest: manifest, + StaticLargeObject: headers.StaticLargeObject, + } + + manifestData, err := GetManifest(client, mo) + if err != nil { + return nil, fmt.Errorf("unable to get manifest for %s/%s: %s", containerName, objectName, err) + } + + if len(manifestData) > 0 { + ok, err := IsIdentical(manifestData, filename) + if err != nil { + return nil, fmt.Errorf("error comparing object %s/%s and path %s: %s", containerName, objectName, filename, err) + } + + if ok { + downloadResult := &DownloadResult{ + Action: "download_object", + Container: containerName, + Object: objectName, + Path: objectPath, + PseudoDir: pseudoDir, + Success: true, + } + + return downloadResult, nil + } + + // This is a Large object + objectDownloadOpts.MultipartManifest = "" + res = objects.Download(client, containerName, objectName, objectDownloadOpts) + if res.Err != nil { + return nil, fmt.Errorf("error downloading object %s/%s: %s", containerName, objectName, err) + } + } + } + } + + if opts.OutFile == "-" && !opts.NoDownload { + downloadResult := &DownloadResult{ + Action: "download_object", + Container: containerName, + Content: res.Body, + Object: objectName, + Path: objectPath, + PseudoDir: pseudoDir, + Success: true, + } + + return downloadResult, nil + } + + contentType := GetContentType(headers.ContentType) + var ctMatch bool + for _, kdm := range knownDirMarkers { + if contentType == kdm { + pseudoDir = true + ctMatch = true + } + } + + // I'm not sure if 0777 is appropriate here. + // It looks to be what python-swiftclient and python os.makedirs is doing. + if ctMatch && opts.OutFile != "-" && !opts.NoDownload { + if err := os.MkdirAll(objectPath, 0777); err != nil { + return nil, fmt.Errorf("error creating directory %s: %s", objectPath, err) + } + } else { + mkdir := !(opts.NoDownload || opts.OutFile == "") + + if mkdir { + dir := filepath.Dir(objectPath) + if _, err := os.Stat(dir); err != nil && os.IsNotExist(err) { + if err := os.MkdirAll(dir, 0777); err != nil { + return nil, fmt.Errorf("error creating directory %s: %s", dir, err) + } + } + } + + var file string + if !opts.NoDownload { + if opts.OutFile != "" { + file = opts.OutFile + } else { + if strings.HasSuffix(objectPath, "/") { + pseudoDir = true + } else { + file = objectPath + } + } + } + + if file != "" { + f, err := os.Create(file) + if err != nil { + return nil, fmt.Errorf("error creating file %s: %s", file, err) + } + defer f.Close() + + buf := make([]byte, diskBuffer) + for { + chunk, err := res.Body.Read(buf) + if err != nil && err != io.EOF { + return nil, fmt.Errorf("error reading object %s/%s: %s", containerName, objectName, err) + } + + if chunk == 0 { + break + } + + if _, err := f.Write(buf[:chunk]); err != nil { + return nil, fmt.Errorf("error writing file %s: %s", file, err) + } + } + f.Close() + } + + if !opts.NoDownload && !opts.IgnoreMtime { + if v, ok := originalMetadata["Mtime"]; ok { + epoch, err := strconv.ParseInt(v, 10, 64) + if err == nil { + epoch = epoch * 1000000000 + mtime := time.Unix(epoch, 0) + if err := os.Chtimes(file, mtime, mtime); err != nil { + return nil, fmt.Errorf("error updating mtime for %s: %s", file, err) + } + } + } + } + } + + downloadResult := &DownloadResult{ + Action: "download_object", + Success: true, + Container: containerName, + Object: objectName, + Path: objectPath, + PseudoDir: pseudoDir, + } + + return downloadResult, nil +} + +// downloadContainer will download all objects in a given container. +func downloadContainer(client *gophercloud.ServiceClient, containerName string, opts *DownloadOpts) ([]DownloadResult, error) { + listOpts := objects.ListOpts{ + Full: true, + Prefix: opts.Prefix, + Delimiter: opts.Delimiter, + } + + var downloadResults []DownloadResult + err := objects.List(client, containerName, listOpts).EachPage(func(page pagination.Page) (bool, error) { + objectList, err := objects.ExtractNames(page) + if err != nil { + return false, fmt.Errorf("error listing container %s: %s", containerName, err) + } + + for _, objectName := range objectList { + result, err := downloadObject(client, containerName, objectName, opts) + if err != nil { + return false, fmt.Errorf("error downloading object %s/%s: %s", containerName, objectName, err) + } + + downloadResults = append(downloadResults, *result) + } + + return true, nil + }) + + if err != nil { + return nil, fmt.Errorf("error downloading container %s: %s", containerName, err) + } + + return downloadResults, nil +} diff --git a/openstack/objectstorage/v1/objects/manifest.go b/openstack/objectstorage/v1/objects/manifest.go new file mode 100644 index 00000000..48087854 --- /dev/null +++ b/openstack/objectstorage/v1/objects/manifest.go @@ -0,0 +1,192 @@ +package objects + +import ( + "bufio" + "crypto/md5" + "encoding/json" + "fmt" + "io" + "os" + "strings" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects" +) + +// Manifest represents an object manifest. +type Manifest struct { + Bytes int64 `json:"bytes"` + ContentType string `json:"content_type"` + Hash string `json:"hash"` + Name string `json:"name"` + LastModified time.Time `json:"-"` +} + +func (r *Manifest) UnmarshalJSON(b []byte) error { + type tmp Manifest + var s struct { + tmp + LastModified gophercloud.JSONRFC3339MilliNoZ `json:"last_modified"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = Manifest(s.tmp) + + r.LastModified = time.Time(s.LastModified) + + return nil +} + +// ExtractMultipartManifest will extract a manifest returned when +// downloading an object using DownloadOpts.MultipartManifest = "get". +func ExtractMultipartManifest(body []byte) ([]Manifest, error) { + var s []Manifest + err := json.Unmarshal(body, &s) + return s, err +} + +type GetManifestOpts struct { + ContainerName string + ContentLength int64 + ETag string + ObjectManifest string + ObjectName string + Manifest string + StaticLargeObject bool +} + +// https://github.com/openstack/python-swiftclient/blob/e65070964c7b1e04119c87e5f344d39358780d18/swiftclient/service.py#L1916 +func GetManifest(client *gophercloud.ServiceClient, opts GetManifestOpts) ([]Manifest, error) { + var manifest []Manifest + + // TODO: test this + if opts.ObjectManifest != "" { + v := strings.SplitN(opts.ObjectManifest, "/", 2) + if len(v) != 2 { + return nil, fmt.Errorf("unable to parse object manifest %s", opts.ObjectManifest) + } + + sContainer := v[0] + sPrefix := v[1] + + listOpts := objects.ListOpts{ + Prefix: sPrefix, + } + + allPages, err := objects.List(client, sContainer, listOpts).AllPages() + if err != nil { + return nil, fmt.Errorf("unable to list %s: %s", sContainer, err) + } + + allObjects, err := objects.ExtractNames(allPages) + if err != nil { + return nil, fmt.Errorf("unable to extract objects from %s: %s", sContainer, err) + } + + for _, obj := range allObjects { + objInfo, err := objects.Get(client, sContainer, obj, nil).Extract() + if err != nil { + return nil, fmt.Errorf("unable to get object %s:%s: %s", sContainer, obj, err) + } + + m := Manifest{ + Bytes: objInfo.ContentLength, + ContentType: objInfo.ContentType, + Hash: objInfo.ETag, + LastModified: objInfo.LastModified, + Name: obj, + } + + manifest = append(manifest, m) + } + + return manifest, nil + } + + if opts.StaticLargeObject { + if opts.Manifest == "" { + downloadOpts := objects.DownloadOpts{ + MultipartManifest: "get", + } + res := objects.Download(client, opts.ContainerName, opts.ObjectName, downloadOpts) + if res.Err != nil { + return nil, res.Err + } + + body, err := res.ExtractContent() + if err != nil { + return nil, err + } + + multipartManifest, err := ExtractMultipartManifest(body) + if err != nil { + return nil, err + } + + for _, obj := range multipartManifest { + // TODO: support sub_slo + m := Manifest{ + Bytes: obj.Bytes, + ContentType: obj.ContentType, + Hash: obj.Hash, + LastModified: obj.LastModified, + Name: obj.Name, + } + manifest = append(manifest, m) + } + } + + return manifest, nil + } + + m := Manifest{ + Hash: opts.ETag, + Bytes: opts.ContentLength, + } + + manifest = append(manifest, m) + + return manifest, nil +} + +func IsIdentical(manifest []Manifest, path string) (bool, error) { + if path == "" { + return false, nil + } + + f, err := os.Open(path) + if err != nil { + return false, err + } + defer f.Close() + + reader := bufio.NewReader(f) + + for _, data := range manifest { + hash := md5.New() + buf := make([]byte, data.Bytes) + n, err := reader.Read(buf) + if err != nil && err != io.EOF { + return false, err + } + + hash.Write(buf[:n]) + checksum := fmt.Sprintf("%x", hash.Sum(nil)) + if checksum != data.Hash { + return false, nil + } + } + + // Do one last read to see if the end of file was reached. + buf := make([]byte, 1) + _, err = reader.Read(buf) + if err == io.EOF { + return true, nil + } + + return false, nil +} diff --git a/openstack/objectstorage/v1/objects/results.go b/openstack/objectstorage/v1/objects/results.go new file mode 100644 index 00000000..2704664c --- /dev/null +++ b/openstack/objectstorage/v1/objects/results.go @@ -0,0 +1,25 @@ +package objects + +import ( + "io" +) + +type DownloadResult struct { + Action string + Container string + Content io.ReadCloser + Object string + Path string + PseudoDir bool + Success bool +} + +type UploadResult struct { + Action string + Container string + LargeObject bool + Object string + Path string + Status string + Success bool +} diff --git a/openstack/objectstorage/v1/objects/testing/fixtures.go b/openstack/objectstorage/v1/objects/testing/fixtures.go new file mode 100644 index 00000000..4475a58a --- /dev/null +++ b/openstack/objectstorage/v1/objects/testing/fixtures.go @@ -0,0 +1,76 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + "time" + + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" + "github.com/gophercloud/utils/openstack/objectstorage/v1/objects" +) + +const multipartManifest = ` +[ + { + "bytes": 104857600, + "content_type": "application/swiftclient-segment", + "hash": "a8b539f420dc1f47a6721cec22efeab3", + "last_modified": "2018-04-22T01:34:00.000000", + "name": "/testContainer/testObject/slo/1524360755.633149/289669120/104857600/00000000" + }, + { + "bytes": 104857600, + "content_type": "application/swiftclient-segment", + "hash": "7fd240a4032a676efd518ffa8601cde1", + "last_modified": "2018-04-22T01:34:00.000000", + "name": "/testContainer/testObject/slo/1524360755.633149/289669120/104857600/00000001" + }, + { + "bytes": 79953920, + "content_type": "application/swiftclient-segment", + "hash": "96414e8a758f1ba7107fd03bc5fc4741", + "last_modified": "2018-04-22T01:34:00.000000", + "name": "/testContainer/testObject/slo/1524360755.633149/289669120/104857600/00000002" + } +] +` + +var expectedMultipartManifest = []objects.Manifest{ + { + Bytes: 104857600, + ContentType: "application/swiftclient-segment", + Hash: "a8b539f420dc1f47a6721cec22efeab3", + LastModified: time.Date(2018, 4, 22, 1, 34, 0, 0, time.UTC), + Name: "/testContainer/testObject/slo/1524360755.633149/289669120/104857600/00000000", + }, + { + Bytes: 104857600, + ContentType: "application/swiftclient-segment", + Hash: "7fd240a4032a676efd518ffa8601cde1", + LastModified: time.Date(2018, 4, 22, 1, 34, 0, 0, time.UTC), + Name: "/testContainer/testObject/slo/1524360755.633149/289669120/104857600/00000001", + }, + { + Bytes: 79953920, + ContentType: "application/swiftclient-segment", + Hash: "96414e8a758f1ba7107fd03bc5fc4741", + LastModified: time.Date(2018, 4, 22, 1, 34, 0, 0, time.UTC), + Name: "/testContainer/testObject/slo/1524360755.633149/289669120/104857600/00000002", + }, +} + +// HandleDownloadManifestSuccessfully creates an HTTP handler at +// `/testContainer/testObject` on the test handler mux that responds with a +// Download response. +func HandleDownloadManifestSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/testContainer/testObject", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + w.Header().Set("Date", "Wed, 10 Nov 2009 23:00:00 GMT") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, multipartManifest) + }) +} diff --git a/openstack/objectstorage/v1/objects/testing/manifest_test.go b/openstack/objectstorage/v1/objects/testing/manifest_test.go new file mode 100644 index 00000000..a5fcf7bd --- /dev/null +++ b/openstack/objectstorage/v1/objects/testing/manifest_test.go @@ -0,0 +1,72 @@ +package testing + +import ( + "testing" + + o "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" + "github.com/gophercloud/utils/openstack/objectstorage/v1/objects" +) + +func TestIsIdentical(t *testing.T) { + cd := []objects.Manifest{ + { + Bytes: 2, + Hash: "60b725f10c9c85c70d97880dfe8191b3", + }, + { + Bytes: 2, + Hash: "3b5d5c3712955042212316173ccf37be", + }, + { + Bytes: 2, + Hash: "2cd6ee2c70b0bde53fbe6cac3c8b8bb1", + }, + { + Bytes: 2, + Hash: "e29311f6f1bf1af907f9ef9f44b8328b", + }, + } + + actual, err := objects.IsIdentical(cd, "t.txt") + th.AssertNoErr(t, err) + th.AssertEquals(t, true, actual) +} + +func TestMultipartManifest(t *testing.T) { + actual, err := objects.ExtractMultipartManifest([]byte(multipartManifest)) + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, expectedMultipartManifest, actual) +} + +func TestChunkData(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDownloadManifestSuccessfully(t) + + downloadOpts := o.DownloadOpts{ + MultipartManifest: "get", + } + + res := o.Download(fake.ServiceClient(), "testContainer", "testObject", downloadOpts) + defer res.Body.Close() + th.AssertNoErr(t, res.Err) + + body, err := res.ExtractContent() + th.AssertNoErr(t, err) + + actualMultipartManifest, err := objects.ExtractMultipartManifest(body) + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, actualMultipartManifest, expectedMultipartManifest) + + gmo := objects.GetManifestOpts{ + ContainerName: "testContainer", + ObjectName: "testObject", + StaticLargeObject: true, + } + + actualChunkData, err := objects.GetManifest(fake.ServiceClient(), gmo) + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, actualChunkData, expectedMultipartManifest) +} diff --git a/openstack/objectstorage/v1/objects/testing/t.txt b/openstack/objectstorage/v1/objects/testing/t.txt new file mode 100644 index 00000000..d68dd403 --- /dev/null +++ b/openstack/objectstorage/v1/objects/testing/t.txt @@ -0,0 +1,4 @@ +a +b +c +d diff --git a/openstack/objectstorage/v1/objects/testing/utils_test.go b/openstack/objectstorage/v1/objects/testing/utils_test.go new file mode 100644 index 00000000..d57d2d0d --- /dev/null +++ b/openstack/objectstorage/v1/objects/testing/utils_test.go @@ -0,0 +1,27 @@ +package testing + +import ( + "testing" + + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/utils/openstack/objectstorage/v1/objects" +) + +func TestContainerPartition(t *testing.T) { + containerName := "foo/bar/baz" + + expectedContainerName := "foo" + expectedPseudoFolder := "bar/baz" + + actualContainerName, actualPseudoFolder := objects.ContainerPartition(containerName) + th.AssertEquals(t, expectedContainerName, actualContainerName) + th.AssertEquals(t, expectedPseudoFolder, actualPseudoFolder) + + containerName = "foo" + expectedContainerName = "foo" + expectedPseudoFolder = "" + + actualContainerName, actualPseudoFolder = objects.ContainerPartition(containerName) + th.AssertEquals(t, expectedContainerName, actualContainerName) + th.AssertEquals(t, expectedPseudoFolder, actualPseudoFolder) +} diff --git a/openstack/objectstorage/v1/objects/upload.go b/openstack/objectstorage/v1/objects/upload.go new file mode 100644 index 00000000..d0711d8b --- /dev/null +++ b/openstack/objectstorage/v1/objects/upload.go @@ -0,0 +1,841 @@ +package objects + +import ( + "bytes" + "crypto/md5" + "encoding/json" + "fmt" + "io" + "net/url" + "os" + "strings" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers" + "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects" +) + +// UploadOpts represents options used for uploading an object. +type UploadOpts struct { + // Changed will prevent an upload if the mtime and size of the source + // and destination objects are the same. + Changed bool + + // Checksum will enforce a comparison of the md5sum/etag between the + // local and remote object to ensure integrity. + Checksum bool + + // Content is an io.Reader which can be used to upload a object via an + // open file descriptor or any other type of stream. + Content io.Reader + + // DirMarker will create a directory marker. + DirMarker bool + + // LeaveSegments will cause old segments of an object to be left in a + // container. + LeaveSegments bool + + // Metadata is optional metadata to place on the object. + Metadata map[string]string + + // Path is a local filesystem path of an object to be uploaded. + Path string + + // Segment container is a custom container name to store object segments. + // If one is not specified, then "containerName_segments" will be used. + SegmentContainer string + + // SegmentSize is the size of each segment. An object will be split into + // pieces (segments) of this size. + SegmentSize int64 + + // SkipIdentical is a more thorough check than "Changed". It will compare + // the md5sum/etag of the object as a comparison. + SkipIdentical bool + + // StoragePolicy represents a storage policy of where the object should be + // uploaded. + StoragePolicy string + + // UseSLO will have the object uploaded using Static Large Object support. + UseSLO bool +} + +// originalObject is an interal structure used to store information about an +// existing object. +type originalObject struct { + headers *objects.GetHeader + metadata map[string]string +} + +// uploadSegmentOpts is an internal structure used for handling the upload +// of an object's segment. +type uploadSegmentOpts struct { + Checksum bool + ContainerName string + Content io.Reader + Path string + ObjectName string + SegmentContainer string + SegmentName string + SegmentSize int64 + SegmentStart int64 + SegmentIndex int +} + +// uploadSegmentResult is an internal structure that represents the result +// result of a segment upload. +type uploadSegmentResult struct { + Complete bool + ETag string + Index int + Location string + Size int64 + Success bool +} + +// uploadSLOManifestOpts is an internal structure that represents +// options used for creating an SLO manifest. +type uploadSLOManifestOpts struct { + Results []uploadSegmentResult + ContainerName string + ObjectName string + Metadata map[string]string +} + +// sloManifest represents an SLO manifest. +type sloManifest struct { + Path string `json:"path"` + ETag string `json:"etag"` + SizeBytes int64 `json:"size_bytes"` +} + +// Upload uploads a single object to swift. +// +// https://github.com/openstack/python-swiftclient/blob/e65070964c7b1e04119c87e5f344d39358780d18/swiftclient/service.py#L1371 +func Upload(client *gophercloud.ServiceClient, containerName, objectName string, opts *UploadOpts) (*UploadResult, error) { + var sourceFileInfo os.FileInfo + origObject := new(originalObject) + + if opts.Path != "" && opts.Content != nil { + return nil, fmt.Errorf("only one of Path and Content can be used") + } + + containerName, pseudoFolder := ContainerPartition(containerName) + if pseudoFolder != "" { + objectName = pseudoFolder + "/" + objectName + } + + if strings.HasPrefix(objectName, `./`) || strings.HasPrefix(objectName, `.\`) { + objectName = string(objectName[:2]) + } + + if strings.HasPrefix(objectName, `/`) { + objectName = string(objectName[:1]) + } + + if len(opts.Metadata) == 0 { + opts.Metadata = make(map[string]string) + } + + // Try to create the container, but ignore any errors. + // TODO: add X-Storage-Policy to Gophercloud. + // If a storage policy was specified, create the container with that policy. + containers.Create(client, containerName, nil) + + // Check and see if the object being requested already exists. + objectResult := objects.Get(client, containerName, objectName, nil) + if objectResult.Err != nil { + if _, ok := objectResult.Err.(gophercloud.ErrDefault404); ok { + origObject = nil + } else { + return nil, fmt.Errorf("error retrieving original object %s/%s: %s", containerName, objectName, objectResult.Err) + } + } + + // If it already exists, stash its headers and metadata for later comparisons. + if origObject != nil { + headers, err := objectResult.Extract() + if err != nil { + return nil, fmt.Errorf("error extracting headers of original object %s/%s: %s", containerName, objectName, err) + } + origObject.headers = headers + + metadata, err := objectResult.ExtractMetadata() + if err != nil { + return nil, fmt.Errorf("error extracting metadata of original object %s/%s: %s", containerName, objectName, err) + } + origObject.metadata = metadata + } + + // Figure out the mtime. + // If a path was specified, then use the file's mtime. + // Otherwise, use the current time. + if opts.Path != "" { + fileInfo, err := os.Stat(opts.Path) + if err != nil { + return nil, fmt.Errorf("error retrieving file stats of %s: %s", opts.Path, err) + } + + // store the file's fileInfo for later reference. + sourceFileInfo = fileInfo + + // Format the file's mtime in the same format used by python-swiftclient. + v := fileInfo.ModTime().UnixNano() + mtime := fmt.Sprintf("%.6f", float64(v)/1000000000) + opts.Metadata["Mtime"] = mtime + } else { + v := time.Now().UnixNano() + mtime := fmt.Sprintf("%.6f", float64(v)/1000000000) + opts.Metadata["Mtime"] = mtime + } + + // If a segment size was specified, then the object will most likely + // be broken up into segments. + if opts.SegmentSize != 0 { + // First determine what the segment container will be called. + if opts.SegmentContainer == "" { + opts.SegmentContainer = containerName + "_segments" + } + + // Then create the segment container. + // TODO: add X-Storage-Policy to Gophercloud. + // Create the segment container in either the specified policy or the same + // policy as the above container. + res := containers.Create(client, opts.SegmentContainer, nil) + if res.Err != nil { + return nil, fmt.Errorf("error creating segment container %s: %s", opts.SegmentContainer, res.Err) + } + } + + // If an io.Reader (streaming) was specified... + if opts.Content != nil { + return uploadObject(client, containerName, objectName, opts, origObject, sourceFileInfo) + } + + // If a local path was specified... + if opts.Path != "" { + if sourceFileInfo.IsDir() { + // If the source path is a directory, then create a Directory Marker, + // even if DirMarker wasn't specified. + return createDirMarker(client, containerName, objectName, opts, origObject, sourceFileInfo) + } + + return uploadObject(client, containerName, objectName, opts, origObject, sourceFileInfo) + } + + if opts.DirMarker { + return createDirMarker(client, containerName, objectName, opts, origObject, sourceFileInfo) + } + + // Finally, create an empty object. + opts.Content = strings.NewReader("") + return uploadObject(client, containerName, objectName, opts, origObject, sourceFileInfo) +} + +// createDirMarker will create a pseudo-directory in Swift. +// +// https://github.com/openstack/python-swiftclient/blob/e65070964c7b1e04119c87e5f344d39358780d18/swiftclient/service.py#L1656 +func createDirMarker( + client *gophercloud.ServiceClient, + containerName string, + objectName string, + opts *UploadOpts, + origObject *originalObject, + sourceFileInfo os.FileInfo) (*UploadResult, error) { + + uploadResult := &UploadResult{ + Action: "create_dir_marker", + Container: containerName, + Object: objectName, + } + + if origObject != nil { + if opts.Changed { + contentLength := origObject.headers.ContentLength + eTag := origObject.headers.ETag + + contentType := GetContentType(origObject.headers.ContentType) + + var mtMatch bool + if origMTime, ok := origObject.metadata["Mtime"]; ok { + if newMTime, ok := opts.Metadata["Mtime"]; ok { + if origMTime == newMTime { + mtMatch = true + } + } + } + + var ctMatch bool + for _, kdm := range knownDirMarkers { + if contentType == kdm { + ctMatch = true + } + } + + if ctMatch && mtMatch && contentLength == 0 && eTag == emptyETag { + uploadResult.Success = true + return uploadResult, nil + } + } + } + + createOpts := objects.CreateOpts{ + Content: strings.NewReader(""), + ContentLength: 0, + ContentType: "application/directory", + Metadata: opts.Metadata, + } + + res := objects.Create(client, containerName, objectName, createOpts) + if res.Err != nil { + return uploadResult, res.Err + } + + uploadResult.Success = true + return uploadResult, nil +} + +// uploadObject handles uploading an object to Swift. +// This includes support for SLO, DLO, and standard uploads +// from both streaming sources and local file paths. +// +// https://github.com/openstack/python-swiftclient/blob/e65070964c7b1e04119c87e5f344d39358780d18/swiftclient/service.py#L2006 +func uploadObject( + client *gophercloud.ServiceClient, + containerName string, + objectName string, + opts *UploadOpts, + origObject *originalObject, + sourceFileInfo os.FileInfo) (*UploadResult, error) { + uploadResult := &UploadResult{ + Action: "upload_action", + Container: containerName, + Object: objectName, + } + + // manifestData contains information about existing objects. + var manifestData []Manifest + + // oldObjectManifest is the existing object's manifest. + var oldObjectManifest string + + // oldSLOManifestPaths is a list of the old object segment's manifest paths. + var oldSLOManifestPaths []string + + // newSLOManifestPaths is a list of the new object segment's manifest paths. + var newSLOManifestPaths []string + + if origObject != nil { + origHeaders := origObject.headers + origMetadata := origObject.metadata + isSLO := origHeaders.StaticLargeObject + + if opts.Changed || opts.SkipIdentical || !opts.LeaveSegments { + var err error + + // If the below conditionals are met, get the manifest data of + // the existing object. + if opts.SkipIdentical || (isSLO && !opts.LeaveSegments) { + mo := GetManifestOpts{ + ContainerName: containerName, + ContentLength: origHeaders.ContentLength, + ETag: origHeaders.ETag, + ObjectManifest: origHeaders.ObjectManifest, + ObjectName: objectName, + StaticLargeObject: origHeaders.StaticLargeObject, + } + + manifestData, err = GetManifest(client, mo) + if err != nil { + return nil, fmt.Errorf("unable to get manifest for %s/%s: %s", containerName, objectName, err) + } + } + + // If SkipIdentical is enabled, compare the md5sum/etag of each + // piece of the manifest to determine if the objects are the same. + if opts.SkipIdentical { + ok, err := IsIdentical(manifestData, opts.Path) + if err != nil { + return nil, fmt.Errorf("error comparing object %s/%s and path %s: %s", containerName, objectName, opts.Path, err) + } + + if ok { + uploadResult.Status = "skip-identical" + uploadResult.Success = true + return uploadResult, nil + } + } + } + + // If the source object is a local file and Changed is enabled, + // compare the mtime and content length to determine if the objects + // are the same. + if opts.Path != "" && opts.Changed { + var mtMatch bool + if v, ok := origMetadata["Mtime"]; ok { + if v == opts.Metadata["Mtime"] { + mtMatch = true + } + } + + var fSizeMatch bool + if origHeaders.ContentLength == sourceFileInfo.Size() { + fSizeMatch = true + } + + if mtMatch && fSizeMatch { + uploadResult.Status = "skip-changed" + uploadResult.Success = true + return uploadResult, nil + } + } + + // If LeaveSegments is set to false (default), keep + // track of the paths of the original object's segments + // so they can be deleted later. + if !opts.LeaveSegments { + oldObjectManifest = origHeaders.ObjectManifest + + if isSLO { + for _, data := range manifestData { + segPath := strings.TrimSuffix(data.Name, "/") + segPath = strings.TrimPrefix(segPath, "/") + oldSLOManifestPaths = append(oldSLOManifestPaths, segPath) + } + } + } + } + + // Segment upload + if opts.Path != "" && opts.SegmentSize > 0 && (sourceFileInfo.Size() > opts.SegmentSize) { + var uploadSegmentResults []uploadSegmentResult + uploadResult.LargeObject = true + + var segStart int64 + var segIndex int + fSize := sourceFileInfo.Size() + segSize := opts.SegmentSize + + for segStart < fSize { + var segName string + + if segStart+segSize > fSize { + segSize = fSize - segStart + } + + if opts.UseSLO { + segName = fmt.Sprintf("%s/slo/%s/%d/%d/%08d", + objectName, opts.Metadata["Mtime"], fSize, opts.SegmentSize, segIndex) + } else { + segName = fmt.Sprintf("%s/%s/%d/%d/%08d", + objectName, opts.Metadata["Mtime"], fSize, opts.SegmentSize, segIndex) + } + + uso := &uploadSegmentOpts{ + Checksum: opts.Checksum, + Path: opts.Path, + ObjectName: objectName, + SegmentContainer: opts.SegmentContainer, + SegmentIndex: segIndex, + SegmentName: segName, + SegmentSize: segSize, + SegmentStart: segStart, + } + + result, err := uploadSegment(client, uso) + if err != nil { + return nil, err + } + + uploadSegmentResults = append(uploadSegmentResults, *result) + + segIndex += 1 + segStart += segSize + } + + if opts.UseSLO { + uploadOpts := &uploadSLOManifestOpts{ + Results: uploadSegmentResults, + ContainerName: containerName, + ObjectName: objectName, + Metadata: opts.Metadata, + } + + err := uploadSLOManifest(client, uploadOpts) + if err != nil { + return nil, err + } + + for _, result := range uploadSegmentResults { + segPath := strings.TrimSuffix(result.Location, "/") + segPath = strings.TrimPrefix(segPath, "/") + newSLOManifestPaths = append(newSLOManifestPaths, segPath) + } + } else { + newObjectManifest := fmt.Sprintf("%s/%s/%s/%d/%d/", + url.QueryEscape(opts.SegmentContainer), url.QueryEscape(objectName), + opts.Metadata["Mtime"], fSize, opts.SegmentSize) + + if oldObjectManifest != "" { + if strings.TrimSuffix(oldObjectManifest, "/") == strings.TrimSuffix(newObjectManifest, "/") { + oldObjectManifest = "" + } + } + + createOpts := objects.CreateOpts{ + Content: strings.NewReader(""), + ContentLength: 0, + Metadata: opts.Metadata, + ObjectManifest: newObjectManifest, + } + + res := objects.Create(client, containerName, objectName, createOpts) + if res.Err != nil { + return nil, res.Err + } + } + } else if opts.UseSLO && opts.SegmentSize > 0 && opts.Path == "" { + // Streaming segment upload + var segIndex int + var uploadSegmentResults []uploadSegmentResult + + for { + segName := fmt.Sprintf("%s/slo/%s/%d/%08d", + objectName, opts.Metadata["Mtime"], opts.SegmentSize, segIndex) + + // Checksum is not passed here because it's always done during streaming. + uso := &uploadSegmentOpts{ + Content: opts.Content, + ContainerName: containerName, + ObjectName: objectName, + SegmentContainer: opts.SegmentContainer, + SegmentIndex: segIndex, + SegmentName: segName, + SegmentSize: opts.SegmentSize, + } + + uploadSegmentResult, err := uploadStreamingSegment(client, uso) + if err != nil { + return nil, fmt.Errorf("error uploading segment %d of %s/%s: %s", segIndex, containerName, objectName, err) + } + + if !uploadSegmentResult.Success { + return nil, fmt.Errorf("Problem uploading segment %d of %s/%s", segIndex, containerName, objectName) + } + + if uploadSegmentResult.Size != 0 { + uploadSegmentResults = append(uploadSegmentResults, *uploadSegmentResult) + } + + if uploadSegmentResult.Complete { + break + } + + segIndex += 1 + } + + if len(uploadSegmentResults) > 0 { + if uploadSegmentResults[0].Location != fmt.Sprintf("/%s/%s", containerName, objectName) { + uploadOpts := &uploadSLOManifestOpts{ + Results: uploadSegmentResults, + ContainerName: containerName, + ObjectName: objectName, + Metadata: opts.Metadata, + } + + err := uploadSLOManifest(client, uploadOpts) + if err != nil { + return nil, fmt.Errorf("error uploading SLO manifest for %s/%s: %s", containerName, objectName, err) + } + + for _, result := range uploadSegmentResults { + newSLOManifestPaths = append(newSLOManifestPaths, result.Location) + } + } else { + uploadResult.LargeObject = false + } + } + } else { + var reader io.Reader + var contentLength int64 + uploadResult.LargeObject = false + + if opts.Path != "" { + f, err := os.Open(opts.Path) + if err != nil { + return nil, err + } + defer f.Close() + + reader = f + contentLength = sourceFileInfo.Size() + } else { + reader = opts.Content + } + + var eTag string + if opts.Checksum { + hash := md5.New() + buf := bytes.NewBuffer([]byte{}) + _, err := io.Copy(io.MultiWriter(hash, buf), reader) + if err != nil && err != io.EOF { + return nil, err + } + + eTag = fmt.Sprintf("%x", hash.Sum(nil)) + reader = bytes.NewReader(buf.Bytes()) + } + + var noETag bool + if !opts.Checksum { + noETag = true + } + + createOpts := objects.CreateOpts{ + Content: reader, + ContentLength: contentLength, + Metadata: opts.Metadata, + ETag: eTag, + NoETag: noETag, + } + + createHeader, err := objects.Create(client, containerName, objectName, createOpts).Extract() + if err != nil { + return nil, err + } + + if opts.Checksum { + if createHeader.ETag != eTag { + err := fmt.Errorf("upload verification failed: md5 mismatch, local %s != remote %s", eTag, createHeader.ETag) + return nil, err + } + } + } + + if oldObjectManifest != "" || len(oldSLOManifestPaths) > 0 { + delObjectMap := make(map[string][]string) + if oldObjectManifest != "" { + var oldObjects []string + + parts := strings.SplitN(oldObjectManifest, "/", 2) + sContainer := parts[0] + sPrefix := parts[1] + + sPrefix = strings.TrimRight(sPrefix, "/") + "/" + + listOpts := objects.ListOpts{ + Prefix: sPrefix, + } + allPages, err := objects.List(client, sContainer, listOpts).AllPages() + if err != nil { + return nil, err + } + + allObjects, err := objects.ExtractNames(allPages) + if err != nil { + return nil, err + } + + for _, o := range allObjects { + oldObjects = append(oldObjects, o) + } + + delObjectMap[sContainer] = oldObjects + } + + if len(oldSLOManifestPaths) > 0 { + for _, segToDelete := range oldSLOManifestPaths { + var oldObjects []string + + var exists bool + for _, newSeg := range newSLOManifestPaths { + if segToDelete == newSeg { + exists = true + } + } + + // Only delete the old segment if it's not part of the new segment. + if !exists { + parts := strings.SplitN(segToDelete, "/", 2) + sContainer := parts[0] + sObject := parts[1] + + if _, ok := delObjectMap[sContainer]; ok { + oldObjects = delObjectMap[sContainer] + } + + oldObjects = append(oldObjects, sObject) + delObjectMap[sContainer] = oldObjects + } + } + } + + for sContainer, oldObjects := range delObjectMap { + for _, oldObject := range oldObjects { + res := objects.Delete(client, sContainer, oldObject, nil) + if res.Err != nil { + return nil, res.Err + } + } + } + } + + uploadResult.Status = "uploaded" + uploadResult.Success = true + return uploadResult, nil +} + +// https://github.com/openstack/python-swiftclient/blob/e65070964c7b1e04119c87e5f344d39358780d18/swiftclient/service.py#L1966 +func uploadSLOManifest(client *gophercloud.ServiceClient, opts *uploadSLOManifestOpts) error { + var manifest []sloManifest + for _, result := range opts.Results { + m := sloManifest{ + Path: result.Location, + ETag: result.ETag, + SizeBytes: result.Size, + } + + manifest = append(manifest, m) + } + + b, err := json.Marshal(manifest) + if err != nil { + return err + } + + createOpts := objects.CreateOpts{ + Content: strings.NewReader(string(b)), + ContentType: "application/json", + Metadata: opts.Metadata, + MultipartManifest: "put", + NoETag: true, + } + + res := objects.Create(client, opts.ContainerName, opts.ObjectName, createOpts) + if res.Err != nil { + return res.Err + } + + return nil +} + +// https://github.com/openstack/python-swiftclient/blob/e65070964c7b1e04119c87e5f344d39358780d18/swiftclient/service.py#L1719 +func uploadSegment(client *gophercloud.ServiceClient, opts *uploadSegmentOpts) (*uploadSegmentResult, error) { + f, err := os.Open(opts.Path) + if err != nil { + return nil, err + } + defer f.Close() + + _, err = f.Seek(opts.SegmentStart, 0) + if err != nil { + return nil, err + } + + buf := make([]byte, opts.SegmentSize) + n, err := f.Read(buf) + if err != nil && err != io.EOF { + return nil, err + } + + var eTag string + if opts.Checksum { + hash := md5.New() + hash.Write(buf) + eTag = fmt.Sprintf("%x", hash.Sum(nil)) + } + + var noETag bool + if !opts.Checksum { + noETag = true + } + + createOpts := objects.CreateOpts{ + ContentLength: int64(n), + ContentType: "application/swiftclient-segment", + Content: bytes.NewReader(buf), + ETag: eTag, + NoETag: noETag, + } + + createHeader, err := objects.Create(client, opts.SegmentContainer, opts.SegmentName, createOpts).Extract() + if err != nil { + return nil, err + } + + if opts.Checksum { + if createHeader.ETag != eTag { + err := fmt.Errorf("Segment %d: upload verification failed: md5 mismatch, local %s != remote %s", opts.SegmentIndex, eTag, createHeader.ETag) + return nil, err + } + } + + result := &uploadSegmentResult{ + ETag: createHeader.ETag, + Index: opts.SegmentIndex, + Location: fmt.Sprintf("/%s/%s", opts.SegmentContainer, opts.SegmentName), + Size: opts.SegmentSize, + } + + return result, nil +} + +// uploadStreamingSegment will upload an object segment from a streaming source. +// +// https://github.com/openstack/python-swiftclient/blob/e65070964c7b1e04119c87e5f344d39358780d18/swiftclient/service.py#L1846 +func uploadStreamingSegment(client *gophercloud.ServiceClient, opts *uploadSegmentOpts) (*uploadSegmentResult, error) { + var result uploadSegmentResult + + // Checksum is always done when streaming. + hash := md5.New() + buf := bytes.NewBuffer([]byte{}) + n, err := io.CopyN(io.MultiWriter(hash, buf), opts.Content, opts.SegmentSize) + if err != nil && err != io.EOF { + return nil, err + } + + localChecksum := fmt.Sprintf("%x", hash.Sum(nil)) + + if n == 0 { + result.Complete = true + result.Success = true + result.Size = 0 + + return &result, nil + } + + createOpts := objects.CreateOpts{ + Content: bytes.NewReader(buf.Bytes()), + ContentLength: n, + ETag: localChecksum, + // TODO + //Metadata: opts.Metadata, + } + + if opts.SegmentIndex == 0 && n < opts.SegmentSize { + res := objects.Create(client, opts.ContainerName, opts.ObjectName, createOpts) + if res.Err != nil { + return nil, res.Err + } + + result.Location = fmt.Sprintf("/%s/%s", opts.ContainerName, opts.ObjectName) + } else { + res := objects.Create(client, opts.SegmentContainer, opts.SegmentName, createOpts) + if res.Err != nil { + return nil, res.Err + } + + result.Location = fmt.Sprintf("/%s/%s", opts.SegmentContainer, opts.SegmentName) + } + + result.Success = true + result.Complete = n < opts.SegmentSize + result.Size = n + result.Index = opts.SegmentIndex + result.ETag = localChecksum + + return &result, nil +} diff --git a/openstack/objectstorage/v1/objects/utils.go b/openstack/objectstorage/v1/objects/utils.go new file mode 100644 index 00000000..73e76044 --- /dev/null +++ b/openstack/objectstorage/v1/objects/utils.go @@ -0,0 +1,69 @@ +package objects + +import ( + "bufio" + "crypto/md5" + "fmt" + "io" + "os" + "strings" +) + +const ( + emptyETag = "d41d8cd98f00b204e9800998ecf8427e" + diskBuffer = 65536 +) + +var ( + knownDirMarkers = []string{ + "application/directory", + "text/directory", + } +) + +func ContainerPartition(containerName string) (string, string) { + var pseudoFolder string + + parts := strings.SplitN(containerName, "/", 2) + if len(parts) == 2 { + containerName = parts[0] + pseudoFolder = strings.TrimSuffix(parts[1], "/") + } + + return containerName, pseudoFolder +} + +// https://github.com/holys/checksum/blob/master/md5/md5.go +func FileMD5Sum(filename string) (string, error) { + if _, err := os.Stat(filename); err != nil { + return "", err + } + + file, err := os.Open(filename) + if err != nil { + return "", err + } + defer file.Close() + + hash := md5.New() + + for buf, reader := make([]byte, diskBuffer), bufio.NewReader(file); ; { + n, err := reader.Read(buf) + if err != nil { + if err == io.EOF { + break + } + + return "", err + } + + hash.Write(buf[:n]) + } + + return fmt.Sprintf("%x", hash.Sum(nil)), nil +} + +func GetContentType(ct string) string { + v := strings.SplitN(ct, ";", 2) + return v[0] +}