diff --git a/.github/workflows/e2e-test.yml b/.github/workflows/e2e-test.yml index d86ea3a..c928046 100644 --- a/.github/workflows/e2e-test.yml +++ b/.github/workflows/e2e-test.yml @@ -30,11 +30,6 @@ jobs: echo AWS_S3_BUCKET='test-bucket' >> .env echo S3API_SERVICE_PORT='5005' >> .env echo AUTH_LEVEL=0 >> .env - echo POSTGRES_CONN_STRING='postgres://user:password@postgres:5432/db?sslmode=disable' >> .env - echo POSTGRES_PASSWORD='password' >> .env - echo POSTGRES_USER='user' >> .env - echo POSTGRES_DB='db' >> .env - echo PG_LOG_CHECKPOINTS='off' >> .env - name: Substitute secret variables in JSON env: diff --git a/blobstore/blobhandler.go b/blobstore/blobhandler.go index 6e16342..bcaf571 100644 --- a/blobstore/blobhandler.go +++ b/blobstore/blobhandler.go @@ -21,6 +21,7 @@ type S3Controller struct { Sess *session.Session S3Svc *s3.S3 Buckets []string + S3Mock bool } // Config holds the configuration settings for the REST API server. @@ -92,13 +93,12 @@ func NewBlobHandler(envJson string, authLvl int) (*BlobHandler, error) { } // Configure the BlobHandler with MinIO session and bucket information - config.S3Controllers = []S3Controller{{Sess: sess, S3Svc: s3SVC, Buckets: []string{creds.Bucket}}} + config.S3Controllers = []S3Controller{{Sess: sess, S3Svc: s3SVC, Buckets: []string{creds.Bucket}, S3Mock: true}} // Return the configured BlobHandler return &config, nil } // Using AWS S3 - // Load AWS credentials from the provided .env.json file log.Debug("looking for .env.json") awsConfig, err := newAWSConfig(envJson) @@ -153,7 +153,7 @@ func NewBlobHandler(envJson string, authLvl int) (*BlobHandler, error) { } if len(bucketNames) > 0 { - config.S3Controllers = append(config.S3Controllers, S3Controller{Sess: sess, S3Svc: s3SVC, Buckets: bucketNames}) + config.S3Controllers = append(config.S3Controllers, S3Controller{Sess: sess, S3Svc: s3SVC, Buckets: bucketNames, S3Mock: false}) } } diff --git a/blobstore/buckets.go b/blobstore/buckets.go index b2d12c1..45eac21 100644 --- a/blobstore/buckets.go +++ b/blobstore/buckets.go @@ -112,9 +112,7 @@ func (bh *BlobHandler) HandleListBuckets(c echo.Context) error { } } bh.Mu.Unlock() - - log.Info("HandleListBuckets: Successfully retrieved list of buckets") - + log.Info("Successfully retrieved list of buckets") return c.JSON(http.StatusOK, allBuckets) } @@ -122,7 +120,7 @@ func (bh *BlobHandler) HandleListBuckets(c echo.Context) error { // bucketName := c.QueryParam("name") // if bucketName == "" { -// err := errors.New("request must include a `name` parameter") +// err := fmt.Errorf("request must include a `name` parameter") // log.Info("HandleCreateBucket: " + err.Error()) // return c.JSON(http.StatusBadRequest, err.Error()) // } @@ -157,7 +155,7 @@ func (bh *BlobHandler) HandleListBuckets(c echo.Context) error { // bucketName := c.QueryParam("name") // if bucketName == "" { -// err := errors.New("request must include a `name` parameter") +// err := fmt.Errorf("request must include a `name` parameter") // log.Info("HandleDeleteBucket: " + err.Error()) // return c.JSON(http.StatusBadRequest, err.Error()) // } @@ -177,7 +175,7 @@ func (bh *BlobHandler) HandleListBuckets(c echo.Context) error { // bucketName := c.QueryParam("name") // if bucketName == "" { -// err := errors.New("request must include a `name` parameter") +// err := fmt.Errorf("request must include a `name` parameter") // log.Info("HandleGetBucketACL: " + err.Error()) // return c.JSON(http.StatusBadRequest, err.Error()) // } diff --git a/blobstore/delete.go b/blobstore/delete.go index 97ab3e7..47fafe1 100644 --- a/blobstore/delete.go +++ b/blobstore/delete.go @@ -1,7 +1,6 @@ package blobstore import ( - "errors" "fmt" "net/http" "strings" @@ -12,39 +11,45 @@ import ( log "github.com/sirupsen/logrus" ) -func (s3Ctrl *S3Controller) RecursivelyDeleteObjects(bucket, prefix string) error { - prefixPath := strings.Trim(prefix, "/") + "/" - query := &s3.ListObjectsV2Input{ - Bucket: aws.String(bucket), - Prefix: aws.String(prefixPath), +func (s3Ctrl *S3Controller) DeleteList(page *s3.ListObjectsV2Output, bucket string) error { + if len(page.Contents) == 0 { + return nil // No objects to delete in this page } - resp, err := s3Ctrl.S3Svc.ListObjectsV2(query) + + var objectsToDelete []*s3.ObjectIdentifier + for _, obj := range page.Contents { + objectsToDelete = append(objectsToDelete, &s3.ObjectIdentifier{Key: obj.Key}) + } + + // Perform the delete operation for the current page + _, err := s3Ctrl.S3Svc.DeleteObjects(&s3.DeleteObjectsInput{ + Bucket: aws.String(bucket), + Delete: &s3.Delete{ + Objects: objectsToDelete, + Quiet: aws.Bool(true), + }, + }) if err != nil { - return fmt.Errorf("recursivelyDeleteObjects: error listing objects: %s", err) + return fmt.Errorf("error deleting objects: %v", err) } - if len(resp.Contents) > 0 { - var objectsToDelete []*s3.ObjectIdentifier - for _, obj := range resp.Contents { - objectsToDelete = append(objectsToDelete, &s3.ObjectIdentifier{ - Key: obj.Key, - }) - } + return nil +} - if len(objectsToDelete) > 0 { - _, err = s3Ctrl.S3Svc.DeleteObjects(&s3.DeleteObjectsInput{ - Bucket: aws.String(bucket), - Delete: &s3.Delete{ - Objects: objectsToDelete, - }, - }) - - if err != nil { - return fmt.Errorf("recursivelyDeleteObjects: error Deleting objects %v: %s", objectsToDelete, err) - } +func (s3Ctrl *S3Controller) RecursivelyDeleteObjects(bucket, prefix string) error { + var objectsFound bool + err := s3Ctrl.GetListWithCallBack(bucket, prefix, false, func(page *s3.ListObjectsV2Output) error { + if len(page.Contents) > 0 { + objectsFound = true } - } else { - return fmt.Errorf("recursivelyDeleteObjects: object %s not found and no objects were deleted", prefixPath) + return s3Ctrl.DeleteList(page, bucket) + }) + if err != nil { + return fmt.Errorf("error processing objects for deletion: %v", err) + } + + if !objectsFound { + return fmt.Errorf("prefix not found") } return nil } @@ -56,28 +61,29 @@ func (bh *BlobHandler) HandleDeleteObject(c echo.Context) error { bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) if err != nil { - errMsg := fmt.Errorf("bucket %s is not available, %s", bucket, err.Error()) + errMsg := fmt.Errorf("parameter `bucket` %s is not available, %s", bucket, err.Error()) log.Error(errMsg.Error()) return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } key := c.QueryParam("key") if key == "" { - err := errors.New("parameter 'key' is required") - log.Errorf("HandleDeleteObjects: %s", err.Error()) - return c.JSON(http.StatusUnprocessableEntity, err.Error()) + errMsg := fmt.Errorf("parameter `key` is required") + log.Error(errMsg.Error()) + return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } // If the key is not a folder, proceed with deleting a single object keyExist, err := s3Ctrl.KeyExists(bucket, key) if err != nil { - log.Errorf("HandleDeleteObjects: Error checking if key exists: %s", err.Error()) - return c.JSON(http.StatusInternalServerError, err) + errMsg := fmt.Errorf("error checking if object exists: %s", err.Error()) + log.Error(errMsg.Error()) + return c.JSON(http.StatusInternalServerError, errMsg.Error()) } if !keyExist { - err := fmt.Errorf("object %s not found", key) - log.Errorf("HandleDeleteObjects: %s", err.Error()) - return c.JSON(http.StatusNotFound, err.Error()) + errMsg := fmt.Errorf("object %s not found", key) + log.Error(errMsg.Error()) + return c.JSON(http.StatusNotFound, errMsg.Error()) } deleteInput := &s3.DeleteObjectInput{ @@ -87,12 +93,12 @@ func (bh *BlobHandler) HandleDeleteObject(c echo.Context) error { _, err = s3Ctrl.S3Svc.DeleteObject(deleteInput) if err != nil { - msg := fmt.Sprintf("error deleting object. %s", err.Error()) - log.Errorf("HandleDeleteObjects: %s", err.Error()) - return c.JSON(http.StatusInternalServerError, msg) + errMsg := fmt.Errorf("error deleting object. %s", err.Error()) + log.Error(errMsg.Error()) + return c.JSON(http.StatusInternalServerError, errMsg.Error()) } - log.Info("HandleDeleteObjects: Successfully deleted file with key:", key) + log.Infof("successfully deleted file with key: %s", key) return c.JSON(http.StatusOK, fmt.Sprintf("Successfully deleted object: %s", key)) } @@ -100,41 +106,32 @@ func (bh *BlobHandler) HandleDeletePrefix(c echo.Context) error { bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) if err != nil { - errMsg := fmt.Errorf("bucket %s is not available, %s", bucket, err.Error()) + errMsg := fmt.Errorf("parameter `bucket` %s is not available, %s", bucket, err.Error()) log.Error(errMsg.Error()) return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } - prefix := c.QueryParam("prefix") if prefix == "" { - err = errors.New("parameter 'prefix' is required") - log.Errorf("HandleDeleteObjects: %s", err.Error()) - return c.JSON(http.StatusUnprocessableEntity, err.Error()) + errMsg := fmt.Errorf("parameter `prefix` is required") + log.Error(errMsg.Error()) + return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } if !strings.HasSuffix(prefix, "/") { prefix = prefix + "/" } - response, err := s3Ctrl.GetList(bucket, prefix, false) - if err != nil { - log.Errorf("HandleDeleteObjects: Error getting list: %s", err.Error()) - return c.JSON(http.StatusInternalServerError, err) - } - if *response.KeyCount == 0 { - err := fmt.Errorf("the specified prefix %s does not exist in S3", prefix) - log.Errorf("HandleDeleteObjects: %s", err.Error()) - return c.JSON(http.StatusNotFound, err.Error()) - } - // This will recursively delete all objects with the specified prefix err = s3Ctrl.RecursivelyDeleteObjects(bucket, prefix) if err != nil { - msg := fmt.Sprintf("error deleting objects. %s", err.Error()) - log.Errorf("HandleDeleteObjects: %s", msg) - return c.JSON(http.StatusInternalServerError, msg) + if strings.Contains(err.Error(), "prefix not found") { + errMsg := fmt.Errorf("no objects found with prefix: %s", prefix) + log.Error(errMsg.Error()) + return c.JSON(http.StatusNotFound, errMsg.Error()) + } + errMsg := fmt.Errorf("error deleting objects: %s", err.Error()) + log.Error(errMsg.Error()) + return c.JSON(http.StatusInternalServerError, errMsg.Error()) } - - log.Info("HandleDeleteObjects: Successfully deleted prefix and its contents for prefix:", prefix) + log.Info("Successfully deleted prefix and its contents for prefix:", prefix) return c.JSON(http.StatusOK, "Successfully deleted prefix and its contents") - } func (s3Ctrl *S3Controller) DeleteKeys(bucket string, key []string) error { @@ -157,7 +154,7 @@ func (s3Ctrl *S3Controller) DeleteKeys(bucket string, key []string) error { _, err := s3Ctrl.S3Svc.DeleteObjects(input) if err != nil { - return fmt.Errorf("deleteKeys: error Deleting objects: %s", err.Error()) + return fmt.Errorf("error deleting objects: %s", err.Error()) } return nil } @@ -169,21 +166,22 @@ func (bh *BlobHandler) HandleDeleteObjectsByList(c echo.Context) error { } var deleteRequest DeleteRequest if err := c.Bind(&deleteRequest); err != nil { - log.Errorf("HandleDeleteObjectsByList: Error parsing request body: %s" + err.Error()) - return c.JSON(http.StatusBadRequest, "Invalid request body") + errMsg := fmt.Errorf("error parsing request body: %s" + err.Error()) + log.Error(errMsg.Error()) + return c.JSON(http.StatusBadRequest, errMsg.Error()) } // Ensure there are keys to delete if len(deleteRequest.Keys) == 0 { - errMsg := "No keys to delete. Please provide 'keys' in the request body." - log.Errorf("HandleDeleteObjectsByList: %s", errMsg) - return c.JSON(http.StatusUnprocessableEntity, errMsg) + errMsg := fmt.Errorf("no keys to delete. Please provide 'keys' in the request body") + log.Error(errMsg.Error()) + return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) if err != nil { - errMsg := fmt.Errorf("bucket %s is not available, %s", bucket, err.Error()) + errMsg := fmt.Errorf("`bucket` %s is not available, %s", bucket, err.Error()) log.Error(errMsg.Error()) return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } @@ -197,14 +195,14 @@ func (bh *BlobHandler) HandleDeleteObjectsByList(c echo.Context) error { // Check if the key exists before appending it to the keys list keyExists, err := s3Ctrl.KeyExists(bucket, s3Path) if err != nil { - msg := fmt.Errorf("error checking if key exists. %s", err.Error()) - log.Errorf("HandleDeleteObjectsByList: %s", msg) - return c.JSON(http.StatusInternalServerError, msg) + errMsg := fmt.Errorf("error checking if object exists. %s", err.Error()) + log.Error(errMsg.Error()) + return c.JSON(http.StatusInternalServerError, errMsg) } if !keyExists { - errMsg := fmt.Sprintf("object %s not found", s3Path) - log.Errorf("HandleDeleteObjectsByList: %s", errMsg) - return c.JSON(http.StatusNotFound, errMsg) + errMsg := fmt.Errorf("object %s not found", s3Path) + log.Error(errMsg.Error()) + return c.JSON(http.StatusNotFound, errMsg.Error()) } keys = append(keys, *key) @@ -213,11 +211,11 @@ func (bh *BlobHandler) HandleDeleteObjectsByList(c echo.Context) error { // Delete the objects using the deleteKeys function err = s3Ctrl.DeleteKeys(bucket, keys) if err != nil { - msg := fmt.Sprintf("error deleting objects. %s", err.Error()) - log.Errorf("HandleDeleteObjectsByList: %s", msg) - return c.JSON(http.StatusInternalServerError, msg) + errMsg := fmt.Errorf("error deleting objects. %s", err.Error()) + log.Error(errMsg.Error()) + return c.JSON(http.StatusInternalServerError, errMsg) } - log.Info("HandleDeleteObjectsByList: Successfully deleted objects:", deleteRequest.Keys) + log.Info("Successfully deleted objects:", deleteRequest.Keys) return c.JSON(http.StatusOK, "Successfully deleted objects") } diff --git a/blobstore/list.go b/blobstore/list.go index d9688b1..b8d96c1 100644 --- a/blobstore/list.go +++ b/blobstore/list.go @@ -1,7 +1,6 @@ package blobstore import ( - "errors" "fmt" "net/http" "path/filepath" @@ -28,19 +27,45 @@ type ListResult struct { ModifiedBy string `json:"modified_by"` } +// CheckAndAdjustPrefix checks if the prefix is an object and adjusts the prefix accordingly. +// Returns the adjusted prefix, an error message (if any), and the HTTP status code. +func CheckAndAdjustPrefix(s3Ctrl *S3Controller, bucket, prefix string) (string, string, int) { + //As of 6/12/24, unsure why ./ is included here, may be needed for an edge case, but could also cause problems + if prefix != "" && prefix != "./" && prefix != "/" { + isObject, err := s3Ctrl.KeyExists(bucket, prefix) + if err != nil { + return "", fmt.Sprintf("error checking if object exists: %s", err.Error()), http.StatusInternalServerError + } + if isObject { + objMeta, err := s3Ctrl.GetMetaData(bucket, prefix) + if err != nil { + return "", fmt.Sprintf("error checking for object's metadata: %s", err.Error()), http.StatusInternalServerError + } + //this is because AWS considers empty prefixes with a .keep as an object, so we ignore and log + if *objMeta.ContentLength == 0 { + log.Infof("detected a zero byte directory marker within prefix: %s", prefix) + } else { + return "", fmt.Sprintf("`%s` is an object, not a prefix. please see options for keys or pass a prefix", prefix), http.StatusTeapot + } + } + prefix = strings.Trim(prefix, "/") + "/" + } + return prefix, "", http.StatusOK +} + // HandleListByPrefix handles the API endpoint for listing objects by prefix in S3 bucket. func (bh *BlobHandler) HandleListByPrefix(c echo.Context) error { prefix := c.QueryParam("prefix") if prefix == "" { - err := errors.New("request must include a `prefix` parameter") - log.Error("HandleListByPrefix: " + err.Error()) - return c.JSON(http.StatusUnprocessableEntity, err.Error()) + errMsg := fmt.Errorf("request must include a `prefix` parameter") + log.Error(errMsg.Error()) + return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) if err != nil { - errMsg := fmt.Errorf("bucket %s is not available, %s", bucket, err.Error()) + errMsg := fmt.Errorf("`bucket` %s is not available, %s", bucket, err.Error()) log.Error(errMsg.Error()) return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } @@ -48,59 +73,46 @@ func (bh *BlobHandler) HandleListByPrefix(c echo.Context) error { delimiterParam := c.QueryParam("delimiter") var delimiter bool if delimiterParam == "true" || delimiterParam == "false" { - var err error delimiter, err = strconv.ParseBool(delimiterParam) if err != nil { - log.Error("HandleListByPrefix: Error parsing `delimiter` param:", err.Error()) - return c.JSON(http.StatusUnprocessableEntity, err.Error()) + errMsg := fmt.Errorf("error parsing `delimiter` param: %s", err.Error()) + log.Error(errMsg.Error()) + return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } } else { - err := errors.New("request must include a `delimiter`, options are `true` or `false`") - log.Error("HandleListByPrefix: " + err.Error()) - return c.JSON(http.StatusUnprocessableEntity, err.Error()) + errMsg := fmt.Errorf("request must include a `delimiter`, options are `true` or `false`") + log.Error(errMsg.Error()) + return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } - if delimiter { - if !strings.HasSuffix(prefix, "/") { - prefix = prefix + "/" - } + if delimiter && !strings.HasSuffix(prefix, "/") { + prefix = prefix + "/" } - isObject, err := s3Ctrl.KeyExists(bucket, prefix) - if err != nil { - log.Error("HandleListByPrefix: can't find bucket or object " + err.Error()) - return c.JSON(http.StatusInternalServerError, err.Error()) + adjustedPrefix, errMsg, statusCode := CheckAndAdjustPrefix(s3Ctrl, bucket, prefix) + if errMsg != "" { + log.Error(errMsg) + return c.JSON(statusCode, errMsg) } + prefix = adjustedPrefix - if isObject { - objMeta, err := s3Ctrl.GetMetaData(bucket, prefix) - if err != nil { - log.Error("HandleListByPrefix: " + err.Error()) - return c.JSON(http.StatusInternalServerError, err.Error()) - } - if *objMeta.ContentLength == 0 { - log.Infof("HandleListByPrefix: Detected a zero byte directory marker within prefix: %s", prefix) - } else { - err = fmt.Errorf("`%s` is an object, not a prefix. please see options for keys or pass a prefix", prefix) - log.Error("HandleListByPrefix: " + err.Error()) - return c.JSON(http.StatusTeapot, err.Error()) + var objectKeys []string + processPage := func(page *s3.ListObjectsV2Output) error { + for _, object := range page.Contents { + objectKeys = append(objectKeys, aws.StringValue(object.Key)) } + return nil } - listOutput, err := s3Ctrl.GetList(bucket, prefix, delimiter) + err = s3Ctrl.GetListWithCallBack(bucket, prefix, delimiter, processPage) if err != nil { - log.Error("HandleListByPrefix: Error getting list:", err.Error()) - return c.JSON(http.StatusInternalServerError, err.Error()) - } - - // Convert the list of object keys to strings - var objectKeys []string - for _, object := range listOutput.Contents { - objectKeys = append(objectKeys, aws.StringValue(object.Key)) + errMsg := fmt.Errorf("error processing objects: %s", err.Error()) + log.Error(errMsg.Error()) + return c.JSON(http.StatusInternalServerError, errMsg.Error()) } - log.Info("HandleListByPrefix: Successfully retrieved list by prefix:", prefix) + log.Info("Successfully retrieved list by prefix:", prefix) return c.JSON(http.StatusOK, objectKeys) } @@ -111,55 +123,25 @@ func (bh *BlobHandler) HandleListByPrefixWithDetail(c echo.Context) error { bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) if err != nil { - errMsg := fmt.Errorf("bucket %s is not available, %s", bucket, err.Error()) + errMsg := fmt.Errorf("`bucket` %s is not available, %s", bucket, err.Error()) log.Error(errMsg.Error()) return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } - if prefix != "" && prefix != "./" && prefix != "/" { - isObject, err := s3Ctrl.KeyExists(bucket, prefix) - if err != nil { - log.Error("HandleListByPrefixWithDetail: " + err.Error()) - return c.JSON(http.StatusInternalServerError, err.Error()) - } - if isObject { - objMeta, err := s3Ctrl.GetMetaData(bucket, prefix) - if err != nil { - log.Error("HandleListByPrefixWithDetail: " + err.Error()) - return c.JSON(http.StatusInternalServerError, err.Error()) - } - if *objMeta.ContentLength == 0 { - log.Infof("HandleListByPrefixWithDetail: Detected a zero byte directory marker within prefix: %s", prefix) - } else { - err = fmt.Errorf("`%s` is an object, not a prefix. please see options for keys or pass a prefix", prefix) - log.Error("HandleListByPrefixWithDetail: " + err.Error()) - return c.JSON(http.StatusTeapot, err.Error()) - } - } - prefix = strings.Trim(prefix, "/") + "/" - } - - query := &s3.ListObjectsV2Input{ - Bucket: aws.String(bucket), - Prefix: aws.String(prefix), - Delimiter: aws.String("/"), - MaxKeys: aws.Int64(1000), + adjustedPrefix, errMsg, statusCode := CheckAndAdjustPrefix(s3Ctrl, bucket, prefix) + if errMsg != "" { + log.Error(errMsg) + return c.JSON(statusCode, errMsg) } + prefix = adjustedPrefix - result := []ListResult{} - truncatedListing := true + var results []ListResult var count int - for truncatedListing { - resp, err := s3Ctrl.S3Svc.ListObjectsV2(query) - if err != nil { - log.Error("HandleListByPrefixWithDetail: error retrieving list with the following query ", err) - errMsg := fmt.Errorf("HandleListByPrefixWithDetail: error retrieving list, %s", err.Error()) - return c.JSON(http.StatusInternalServerError, errMsg.Error()) - } - - for _, cp := range resp.CommonPrefixes { - w := ListResult{ + processPage := func(page *s3.ListObjectsV2Output) error { + for _, cp := range page.CommonPrefixes { + // Handle directories (common prefixes) + dir := ListResult{ ID: count, Name: filepath.Base(*cp.Prefix), Size: "", @@ -168,37 +150,37 @@ func (bh *BlobHandler) HandleListByPrefixWithDetail(c echo.Context) error { IsDir: true, ModifiedBy: "", } + results = append(results, dir) count++ - result = append(result, w) } - for _, object := range resp.Contents { - parts := strings.Split(filepath.Dir(*object.Key), "/") - isSelf := filepath.Base(*object.Key) == parts[len(parts)-1] - - if !isSelf { - w := ListResult{ - ID: count, - Name: filepath.Base(*object.Key), - Size: strconv.FormatInt(*object.Size, 10), - Path: filepath.Dir(*object.Key), - Type: filepath.Ext(*object.Key), - IsDir: false, - Modified: *object.LastModified, - ModifiedBy: "", - } - - count++ - result = append(result, w) + for _, object := range page.Contents { + // Handle files + file := ListResult{ + ID: count, + Name: filepath.Base(*object.Key), + Size: strconv.FormatInt(*object.Size, 10), + Path: filepath.Dir(*object.Key), + Type: filepath.Ext(*object.Key), + IsDir: false, + Modified: *object.LastModified, + ModifiedBy: "", } + results = append(results, file) + count++ } + return nil + } - query.ContinuationToken = resp.NextContinuationToken - truncatedListing = *resp.IsTruncated + err = s3Ctrl.GetListWithCallBack(bucket, prefix, true, processPage) + if err != nil { + errMsg := fmt.Errorf("error processing objects: %s", err.Error()) + log.Error(errMsg.Error()) + return c.JSON(http.StatusInternalServerError, errMsg.Error()) } - log.Info("HandleListByPrefix: Successfully retrieved list by prefix with detail:", prefix) - return c.JSON(http.StatusOK, result) + log.Info("Successfully retrieved detailed list by prefix:", prefix) + return c.JSON(http.StatusOK, results) } // GetList retrieves a list of objects in the specified S3 bucket with the given prefix. @@ -239,3 +221,30 @@ func (s3Ctrl *S3Controller) GetList(bucket, prefix string, delimiter bool) (*s3. return response, nil } + +// GetListWithCallBack is the same as GetList, except instead of returning the entire list at once, it gives you the option of processing page by page +// this method is safer than GetList as it avoid memory overload for large datasets since it does not store the entire list in memory but rather processes it on the go. +func (s3Ctrl *S3Controller) GetListWithCallBack(bucket, prefix string, delimiter bool, processPage func(*s3.ListObjectsV2Output) error) error { + input := &s3.ListObjectsV2Input{ + Bucket: aws.String(bucket), + Prefix: aws.String(prefix), + MaxKeys: aws.Int64(1000), // Adjust the MaxKeys as needed + } + + if delimiter { + input.SetDelimiter("/") + } + + var lastError error // Variable to capture the last error + + // Iterate over the pages of results + err := s3Ctrl.S3Svc.ListObjectsV2Pages(input, func(page *s3.ListObjectsV2Output, _ bool) bool { + lastError = processPage(page) + return lastError == nil && *page.IsTruncated // Continue if no error and more pages are available + }) + + if lastError != nil { + return lastError // Return the last error encountered in the processPage function + } + return err // Return any errors encountered in the pagination process +} diff --git a/blobstore/metadata.go b/blobstore/metadata.go index 03c0f78..c0ae9a5 100644 --- a/blobstore/metadata.go +++ b/blobstore/metadata.go @@ -1,7 +1,6 @@ package blobstore import ( - "errors" "fmt" "net/http" @@ -12,38 +11,35 @@ import ( log "github.com/sirupsen/logrus" ) -func (bh *BlobHandler) GetSize(list *s3.ListObjectsV2Output) (uint64, uint32, error) { - if list == nil { - return 0, 0, errors.New("getSize: input list is nil") +func (bh *BlobHandler) GetSize(page *s3.ListObjectsV2Output, totalSize *uint64, fileCount *uint64) error { + if page == nil { + return fmt.Errorf("input page is nil") } - var size uint64 = 0 - fileCount := uint32(len(list.Contents)) - - for _, file := range list.Contents { + for _, file := range page.Contents { if file.Size == nil { - return 0, 0, errors.New("getSize: file size is nil") + return fmt.Errorf("file size is nil") } - size += uint64(*file.Size) + *totalSize += uint64(*file.Size) + *fileCount++ } - return size, fileCount, nil + return nil } -// HandleGetSize retrieves the total size and the number of files in the specified S3 bucket with the given prefix. // HandleGetSize retrieves the total size and the number of files in the specified S3 bucket with the given prefix. func (bh *BlobHandler) HandleGetSize(c echo.Context) error { prefix := c.QueryParam("prefix") if prefix == "" { - err := errors.New("request must include a `prefix` parameter") - log.Error("HandleGetSize: " + err.Error()) - return c.JSON(http.StatusUnprocessableEntity, err.Error()) + errMsg := fmt.Errorf("request must include a `prefix` parameter") + log.Error(errMsg.Error()) + return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) if err != nil { - errMsg := fmt.Errorf("bucket %s is not available, %s", bucket, err.Error()) + errMsg := fmt.Errorf("`bucket` %s is not available, %s", bucket, err.Error()) log.Error(errMsg.Error()) return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } @@ -51,57 +47,59 @@ func (bh *BlobHandler) HandleGetSize(c echo.Context) error { // Check if the prefix points directly to an object isObject, err := s3Ctrl.KeyExists(bucket, prefix) if err != nil { - log.Error("HandleGetSize: Error checking if prefix is an object:", err.Error()) - return c.JSON(http.StatusInternalServerError, err.Error()) + errMsg := fmt.Errorf("error checking if prefix is an object: %s", err.Error()) + log.Error(errMsg.Error()) + return c.JSON(http.StatusInternalServerError, errMsg.Error()) } if isObject { - // Prefix points directly to an object instead of a collection of objects - return c.JSON(http.StatusTeapot, "The provided prefix points to a single object rather than a collection") - } - list, err := s3Ctrl.GetList(bucket, prefix, false) - if err != nil { - log.Error("HandleGetSize: Error getting list:", err.Error()) - return c.JSON(http.StatusInternalServerError, err.Error()) + errMsg := fmt.Errorf("the provided prefix %s points to a single object rather than a collection", prefix) + log.Error(errMsg.Error()) + return c.JSON(http.StatusTeapot, errMsg.Error()) } - if len(list.Contents) == 0 { - // No objects found with the provided prefix - return c.JSON(http.StatusNotFound, "Prefix not found") - } + var totalSize uint64 + var fileCount uint64 + err = s3Ctrl.GetListWithCallBack(bucket, prefix, false, func(page *s3.ListObjectsV2Output) error { + return bh.GetSize(page, &totalSize, &fileCount) + }) - size, fileCount, err := bh.GetSize(list) if err != nil { - log.Error("HandleGetSize: Error getting size:", err.Error()) - return c.JSON(http.StatusInternalServerError, err.Error()) + errMsg := fmt.Errorf("error processing objects: %s", err.Error()) + log.Error(errMsg.Error()) + return c.JSON(http.StatusInternalServerError, errMsg.Error()) + } + if fileCount == 0 { + errMsg := fmt.Errorf("prefix %s not found", prefix) + log.Error(errMsg.Error()) + return c.JSON(http.StatusNotFound, errMsg.Error()) } - response := struct { Size uint64 `json:"size"` - FileCount uint32 `json:"file_count"` + FileCount uint64 `json:"file_count"` Prefix string `json:"prefix"` }{ - Size: size, + Size: totalSize, FileCount: fileCount, Prefix: prefix, } - log.Info("HandleGetSize: Successfully retrieved size for prefix:", prefix) + log.Info("Successfully retrieved size for prefix:", prefix) return c.JSON(http.StatusOK, response) } func (bh *BlobHandler) HandleGetMetaData(c echo.Context) error { key := c.QueryParam("key") if key == "" { - err := errors.New("request must include a `key` parameter") - log.Error("HandleGetMetaData: " + err.Error()) - return c.JSON(http.StatusUnprocessableEntity, err.Error()) + errMsg := fmt.Errorf("request must include a `key` parameter") + log.Error(errMsg.Error()) + return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) if err != nil { - errMsg := fmt.Errorf("bucket %s is not available, %s", bucket, err.Error()) + errMsg := fmt.Errorf("`bucket` %s is not available, %s", bucket, err.Error()) log.Error(errMsg.Error()) return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } @@ -109,40 +107,42 @@ func (bh *BlobHandler) HandleGetMetaData(c echo.Context) error { result, err := s3Ctrl.GetMetaData(bucket, key) if err != nil { if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" { - err := fmt.Errorf("object %s not found", key) - log.Error("HandleGetMetaData: " + err.Error()) - return c.JSON(http.StatusNotFound, err.Error()) + errMsg := fmt.Errorf("object %s not found", key) + log.Error(errMsg.Error()) + return c.JSON(http.StatusNotFound, errMsg.Error()) } - log.Error("HandleGetMetaData: Error getting metadata:", err.Error()) - return c.JSON(http.StatusInternalServerError, err.Error()) + errMsg := fmt.Errorf("error getting metadata: %s", err.Error()) + log.Error(errMsg.Error()) + return c.JSON(http.StatusInternalServerError, errMsg.Error()) } - log.Info("HandleGetMetaData: Successfully retrieved metadata for key:", key) + log.Info("successfully retrieved metadata for key:", key) return c.JSON(http.StatusOK, result) } func (bh *BlobHandler) HandleGetObjExist(c echo.Context) error { key := c.QueryParam("key") if key == "" { - err := errors.New("request must include a `key` parameter") - log.Error("HandleGetObjExist: " + err.Error()) - return c.JSON(http.StatusUnprocessableEntity, err.Error()) + errMsg := fmt.Errorf("request must include a `key` parameter") + log.Error(errMsg.Error()) + return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) if err != nil { - errMsg := fmt.Errorf("bucket %s is not available, %s", bucket, err.Error()) + errMsg := fmt.Errorf("`bucket` %s is not available, %s", bucket, err.Error()) log.Error(errMsg.Error()) return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } result, err := s3Ctrl.KeyExists(bucket, key) if err != nil { - log.Error("HandleGetObjExist: " + err.Error()) - return c.JSON(http.StatusInternalServerError, err.Error()) + errMsg := fmt.Errorf("error checking if object exists: %s", err.Error()) + log.Error(errMsg.Error()) + return c.JSON(http.StatusInternalServerError, errMsg.Error()) } - log.Info("HandleGetObjExist: Successfully retrieved metadata for key:", key) + log.Info("successfully retrieved metadata for key:", key) return c.JSON(http.StatusOK, result) } diff --git a/blobstore/move.go b/blobstore/move.go index b84e798..07b0b49 100644 --- a/blobstore/move.go +++ b/blobstore/move.go @@ -1,7 +1,6 @@ package blobstore import ( - "errors" "fmt" "net/http" "strings" @@ -16,9 +15,9 @@ func (bh *BlobHandler) HandleMovePrefix(c echo.Context) error { srcPrefix := c.QueryParam("src_prefix") destPrefix := c.QueryParam("dest_prefix") if srcPrefix == "" || destPrefix == "" { - err := errors.New("parameters `src_key` and `dest_key` are required") - log.Error("HandleCopyPrefix", err.Error()) - return c.JSON(http.StatusUnprocessableEntity, err.Error()) + errMsg := fmt.Errorf("parameters `src_key` and `dest_key` are required") + log.Error(errMsg.Error()) + return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } if !strings.HasSuffix(srcPrefix, "/") { srcPrefix = srcPrefix + "/" @@ -30,15 +29,16 @@ func (bh *BlobHandler) HandleMovePrefix(c echo.Context) error { bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) if err != nil { - errMsg := fmt.Errorf("bucket %s is not available, %s", bucket, err.Error()) + errMsg := fmt.Errorf("`bucket` %s is not available, %s", bucket, err.Error()) log.Error(errMsg.Error()) return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } - - err = s3Ctrl.CopyPrefix(bucket, srcPrefix, destPrefix) + err = s3Ctrl.MovePrefix(bucket, srcPrefix, destPrefix) if err != nil { - if strings.Contains(err.Error(), "does not exist") { - return c.JSON(http.StatusNotFound, err.Error()) + if strings.Contains(err.Error(), "source prefix not found") { + errMsg := fmt.Errorf("no objects found with source prefix: %s", srcPrefix) + log.Error(errMsg.Error()) + return c.JSON(http.StatusNotFound, errMsg.Error()) } return c.JSON(http.StatusInternalServerError, err.Error()) } @@ -46,28 +46,50 @@ func (bh *BlobHandler) HandleMovePrefix(c echo.Context) error { return c.JSON(http.StatusOK, fmt.Sprintf("Successfully moved prefix from %s to %s", srcPrefix, destPrefix)) } -func (s3Ctrl *S3Controller) CopyPrefix(bucket, srcPrefix, destPrefix string) error { - // List objects within the source prefix - listOutput, err := s3Ctrl.GetList(bucket, srcPrefix, true) - if err != nil { - return errors.New("error listing objects with prefix " + srcPrefix + " in bucket " + bucket + ", " + err.Error()) - } +func (s3Ctrl *S3Controller) MovePrefix(bucket, srcPrefix, destPrefix string) error { + var objectsFound bool + + processPage := func(page *s3.ListObjectsV2Output) error { + if len(page.Contents) == 0 { + return nil // No objects to process in this page + } + objectsFound = true // Objects found, set the flag + + for _, object := range page.Contents { + srcObjectKey := aws.StringValue(object.Key) + destObjectKey := strings.Replace(srcObjectKey, srcPrefix, destPrefix, 1) + + // Copy the object to the new location + copyInput := &s3.CopyObjectInput{ + Bucket: aws.String(bucket), + CopySource: aws.String(bucket + "/" + srcObjectKey), + Key: aws.String(destObjectKey), + } + _, err := s3Ctrl.S3Svc.CopyObject(copyInput) + if err != nil { + return fmt.Errorf("error copying object %s to %s: %v", srcObjectKey, destObjectKey, err) + } + } - if len(listOutput.Contents) == 0 { - return errors.New("source prefix " + srcPrefix + " does not exist") + // Deleting the source objects should be handled carefully + // Ensure that your application logic requires this before proceeding + err := s3Ctrl.DeleteList(page, bucket) + if err != nil { + return fmt.Errorf("error deleting from source prefix %s: %v", srcPrefix, err) + } + return nil } - // Copy each object to the destination prefix - for _, object := range listOutput.Contents { - srcObjectKey := aws.StringValue(object.Key) - destObjectKey := strings.Replace(srcObjectKey, srcPrefix, destPrefix, 1) + err := s3Ctrl.GetListWithCallBack(bucket, srcPrefix, false, processPage) + if err != nil { + return fmt.Errorf("error processing objects for move: %v", err) + } - copyErr := s3Ctrl.CopyObject(bucket, srcObjectKey, destObjectKey) - if copyErr != nil { - // If an error occurs during copying, return immediately - return copyErr - } + // Check if objects were found after processing all pages + if !objectsFound { + return fmt.Errorf("source prefix not found") } + return nil } @@ -75,15 +97,15 @@ func (bh *BlobHandler) HandleMoveObject(c echo.Context) error { srcObjectKey := c.QueryParam("src_key") destObjectKey := c.QueryParam("dest_key") if srcObjectKey == "" || destObjectKey == "" { - err := errors.New("paramters `src_key` and `dest_key` are required") - log.Error("HandleCopyObject", err.Error()) - return c.JSON(http.StatusUnprocessableEntity, err.Error()) + errMsg := fmt.Errorf("paramters `src_key` and `dest_key` are required") + log.Error(errMsg.Error()) + return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) if err != nil { - errMsg := fmt.Errorf("bucket %s is not available, %s", bucket, err.Error()) + errMsg := fmt.Errorf("`bucket` %s is not available, %s", bucket, err.Error()) log.Error(errMsg.Error()) return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } @@ -91,14 +113,18 @@ func (bh *BlobHandler) HandleMoveObject(c echo.Context) error { err = s3Ctrl.CopyObject(bucket, srcObjectKey, destObjectKey) if err != nil { if strings.Contains(err.Error(), "keys are identical; no action taken") { + log.Error(err.Error()) return c.JSON(http.StatusBadRequest, err.Error()) // 400 Bad Request } else if strings.Contains(err.Error(), "already exists in the bucket; duplication will cause an overwrite") { + log.Error(err.Error()) return c.JSON(http.StatusConflict, err.Error()) // 409 Conflict } else if strings.Contains(err.Error(), "does not exist") { + log.Error(err.Error()) return c.JSON(http.StatusNotFound, err.Error()) } - log.Error("HandleCopyObject: Error when implementing copyObject", err.Error()) - return c.JSON(http.StatusInternalServerError, err.Error()) + errMsg := fmt.Errorf("error when copying object: %s", err.Error()) + log.Error(errMsg.Error()) + return c.JSON(http.StatusInternalServerError, errMsg.Error()) } return c.JSON(http.StatusOK, fmt.Sprintf("Succesfully moved object from %s to %s", srcObjectKey, destObjectKey)) @@ -115,7 +141,7 @@ func (s3Ctrl *S3Controller) CopyObject(bucket, srcObjectKey, destObjectKey strin return fmt.Errorf("error checking if object %s exists: %s", destObjectKey, err.Error()) } if !oldKeyExists { - return errors.New("`srcObjectKey` " + srcObjectKey + " does not exist") + return fmt.Errorf("`srcObjectKey` " + srcObjectKey + " does not exist") } // Check if the new key already exists in the bucket newKeyExists, err := s3Ctrl.KeyExists(bucket, destObjectKey) @@ -123,7 +149,7 @@ func (s3Ctrl *S3Controller) CopyObject(bucket, srcObjectKey, destObjectKey strin return fmt.Errorf("error checking if object %s exists: %s", destObjectKey, err.Error()) } if newKeyExists { - return errors.New(destObjectKey + " already exists in the bucket; duplication will cause an overwrite. Please rename dest_key to a different name") + return fmt.Errorf(destObjectKey + " already exists in the bucket; duplication will cause an overwrite. Please rename dest_key to a different name") } // Set up input parameters for the CopyObject API to rename the object copyInput := &s3.CopyObjectInput{ @@ -135,7 +161,7 @@ func (s3Ctrl *S3Controller) CopyObject(bucket, srcObjectKey, destObjectKey strin // Copy the object to the new key (effectively renaming) _, err = s3Ctrl.S3Svc.CopyObject(copyInput) if err != nil { - return errors.New("error copying object" + srcObjectKey + "with the new key" + destObjectKey + ", " + err.Error()) + return fmt.Errorf("error copying object" + srcObjectKey + "with the new key" + destObjectKey + ", " + err.Error()) } // Delete the source object @@ -144,7 +170,7 @@ func (s3Ctrl *S3Controller) CopyObject(bucket, srcObjectKey, destObjectKey strin Key: aws.String(srcObjectKey), }) if err != nil { - return errors.New("error deleting old object " + srcObjectKey + " in bucket " + bucket + ", " + err.Error()) + return fmt.Errorf("error deleting old object " + srcObjectKey + " in bucket " + bucket + ", " + err.Error()) } return nil diff --git a/blobstore/object_content.go b/blobstore/object_content.go index 8a7e073..505156b 100644 --- a/blobstore/object_content.go +++ b/blobstore/object_content.go @@ -1,7 +1,6 @@ package blobstore import ( - "errors" "fmt" "io" "net/http" @@ -36,26 +35,27 @@ func (s3Ctrl *S3Controller) FetchObjectContent(bucket string, key string) (io.Re func (bh *BlobHandler) HandleObjectContents(c echo.Context) error { key := c.QueryParam("key") if key == "" { - err := errors.New("parameter 'key' is required") - log.Error("HandleObjectContents: " + err.Error()) - return c.JSON(http.StatusUnprocessableEntity, err.Error()) + errMsg := fmt.Errorf("parameter 'key' is required") + log.Error(errMsg.Error()) + return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) if err != nil { - errMsg := fmt.Errorf("bucket %s is not available, %s", bucket, err.Error()) + errMsg := fmt.Errorf("`bucket` %s is not available, %s", bucket, err.Error()) log.Error(errMsg.Error()) return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } outPutBody, err := s3Ctrl.FetchObjectContent(bucket, key) if err != nil { - log.Error("HandleObjectContents: " + err.Error()) - if strings.Contains(err.Error(), "object") { - return c.JSON(http.StatusNotFound, err.Error()) + errMsg := fmt.Errorf("error fetching object's content: %s", err.Error()) + log.Error(errMsg.Error()) + if strings.Contains(err.Error(), "not found") { + return c.JSON(http.StatusNotFound, errMsg.Error()) } else { - return c.JSON(http.StatusInternalServerError, err.Error()) + return c.JSON(http.StatusInternalServerError, errMsg.Error()) } } body, err := io.ReadAll(outPutBody) diff --git a/blobstore/presigned_url.go b/blobstore/presigned_url.go index df1721d..a4f7c55 100644 --- a/blobstore/presigned_url.go +++ b/blobstore/presigned_url.go @@ -4,7 +4,6 @@ import ( "archive/tar" "bytes" "compress/gzip" - "errors" "fmt" "io" "net/http" @@ -118,52 +117,54 @@ func (bh *BlobHandler) HandleGetPresignedDownloadURL(c echo.Context) error { bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) if err != nil { - errMsg := fmt.Errorf("bucket %s is not available, %s", bucket, err.Error()) + errMsg := fmt.Errorf("`bucket` %s is not available, %s", bucket, err.Error()) log.Error(errMsg.Error()) return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } key := c.QueryParam("key") if key == "" { - err := errors.New("parameter `key` is required") - log.Error("HandleGetPresignedURL: " + err.Error()) - return c.JSON(http.StatusUnprocessableEntity, err.Error()) + errMsg := fmt.Errorf("parameter `key` is required") + log.Error(errMsg.Error()) + return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } keyExist, err := s3Ctrl.KeyExists(bucket, key) if err != nil { - log.Error("HandleGetPresignedURL: Error checking if key exists:", err.Error()) - return c.JSON(http.StatusInternalServerError, err.Error()) + errMsg := fmt.Errorf("checking if object exists: %s", err.Error()) + log.Error(errMsg.Error()) + return c.JSON(http.StatusInternalServerError, errMsg.Error()) } if !keyExist { - err := fmt.Errorf("object %s not found", key) - log.Error("HandleGetPresignedURL: " + err.Error()) - return c.JSON(http.StatusNotFound, err.Error()) + errMsg := fmt.Errorf("object %s not found", key) + log.Error(errMsg.Error()) + return c.JSON(http.StatusNotFound, errMsg.Error()) } // Set the expiration time for the pre-signed URL url, err := s3Ctrl.GetDownloadPresignedURL(bucket, key, bh.Config.DefaultDownloadPresignedUrlExpiration) if err != nil { - log.Error("HandleGetPresignedURL: Error getting presigned URL:", err.Error()) - return c.JSON(http.StatusInternalServerError, err.Error()) + errMsg := fmt.Errorf("error getting presigned URL: %s", err.Error()) + log.Error(errMsg.Error()) + return c.JSON(http.StatusInternalServerError, errMsg.Error()) } - log.Info("HandleGetPresignedURL: Successfully generated presigned URL for key:", key) + log.Info("successfully generated presigned URL for key:", key) return c.JSON(http.StatusOK, url) } func (bh *BlobHandler) HandleGetPresignedURLMultiObj(c echo.Context) error { prefix := c.QueryParam("prefix") if prefix == "" { - err := errors.New("request must include a `prefix` parameter") - log.Error("HandleGetPresignedURLMultiObj: " + err.Error()) - return c.JSON(http.StatusUnprocessableEntity, err.Error()) + errMsg := fmt.Errorf("request must include a `prefix` parameter") + log.Error(errMsg.Error()) + return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) if err != nil { - errMsg := fmt.Errorf("bucket %s is not available, %s", bucket, err.Error()) + errMsg := fmt.Errorf("`bucket` %s is not available, %s", bucket, err.Error()) log.Error(errMsg.Error()) return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } @@ -174,16 +175,18 @@ func (bh *BlobHandler) HandleGetPresignedURLMultiObj(c echo.Context) error { response, err := s3Ctrl.GetList(bucket, prefix, false) if err != nil { - log.Error("HandleGetPresignedURLMultiObj: Error getting list:", err.Error()) - return c.JSON(http.StatusInternalServerError, err.Error()) + errMsg := fmt.Errorf("error getting list: %s", err.Error()) + log.Error(errMsg.Error()) + return c.JSON(http.StatusInternalServerError, errMsg.Error()) } if *response.KeyCount == 0 { errMsg := fmt.Errorf("the specified prefix %s does not exist in S3", prefix) - log.Error("HandleGetPresignedURLMultiObj: " + errMsg.Error()) + log.Error(errMsg.Error()) return c.JSON(http.StatusNotFound, errMsg.Error()) } //check if size is below 5GB - size, _, err := bh.GetSize(response) + var size, fileCount uint64 + err = bh.GetSize(response, &size, &fileCount) if err != nil { errMsg := fmt.Errorf("error getting size: %s", err.Error()) log.Error(errMsg.Error()) @@ -192,9 +195,9 @@ func (bh *BlobHandler) HandleGetPresignedURLMultiObj(c echo.Context) error { limit := uint64(1024 * 1024 * 1024 * bh.Config.DefaultZipDownloadSizeLimit) if size >= limit { - err := fmt.Errorf("request entity is larger than %v GB, current prefix size is: %v GB", bh.Config.DefaultZipDownloadSizeLimit, float64(size)/(1024*1024*1024)) - log.Error("HandleGetPresignedURLMultiObj: ", err.Error()) - return c.JSON(http.StatusRequestEntityTooLarge, err.Error()) + errMsg := fmt.Errorf("request entity is larger than %v GB, current prefix size is: %v GB", bh.Config.DefaultZipDownloadSizeLimit, float64(size)/(1024*1024*1024)) + log.Error(errMsg.Error()) + return c.JSON(http.StatusRequestEntityTooLarge, errMsg.Error()) } filename := fmt.Sprintf("%s.%s", strings.TrimSuffix(prefix, "/"), "tar.gz") @@ -203,8 +206,9 @@ func (bh *BlobHandler) HandleGetPresignedURLMultiObj(c echo.Context) error { // Check if the tar.gz file already exists in S3 tarFileResponse, err := s3Ctrl.GetList(bucket, outputFile, false) if err != nil { - log.Error("Error checking if tar.gz file exists in S3:", err) - return c.JSON(http.StatusInternalServerError, err.Error()) + errMsg := fmt.Errorf("error checking if tar.gz file exists in S3: %s", err) + log.Error(errMsg.Error()) + return c.JSON(http.StatusInternalServerError, errMsg.Error()) } if len(tarFileResponse.Contents) > 0 { @@ -212,8 +216,9 @@ func (bh *BlobHandler) HandleGetPresignedURLMultiObj(c echo.Context) error { // Tar.gz file exists, now compare modification dates mostRecentModTime, err := s3Ctrl.getMostRecentModTime(bucket, prefix) if err != nil { - log.Error("Error getting most recent modification time:", err) - return c.JSON(http.StatusInternalServerError, err.Error()) + errMsg := fmt.Errorf("error getting most recent modification time: %s", err) + log.Error(errMsg.Error()) + return c.JSON(http.StatusInternalServerError, errMsg.Error()) } if tarFileResponse.Contents[0].LastModified.After(mostRecentModTime) { @@ -222,8 +227,9 @@ func (bh *BlobHandler) HandleGetPresignedURLMultiObj(c echo.Context) error { // Existing tar.gz file is up-to-date, return pre-signed URL href, err := s3Ctrl.GetDownloadPresignedURL(bucket, outputFile, bh.Config.DefaultDownloadPresignedUrlExpiration) if err != nil { - log.Error("Error getting presigned:", err) - return c.JSON(http.StatusInternalServerError, err.Error()) + errMsg := fmt.Errorf("error getting presigned: %s", err) + log.Error(errMsg.Error()) + return c.JSON(http.StatusInternalServerError, errMsg.Error()) } return c.JSON(http.StatusOK, string(href)) } @@ -232,31 +238,30 @@ func (bh *BlobHandler) HandleGetPresignedURLMultiObj(c echo.Context) error { err = s3Ctrl.tarS3Files(response, bucket, outputFile, prefix) if err != nil { - log.Error("HandleGetPresignedURLMultiObj: Error tarring S3 files:", err.Error()) - return c.JSON(http.StatusInternalServerError, err.Error()) + errMsg := fmt.Errorf("error tarring S3 files: %s", err) + log.Error(errMsg.Error()) + return c.JSON(http.StatusInternalServerError, errMsg.Error()) } href, err := s3Ctrl.GetDownloadPresignedURL(bucket, outputFile, bh.Config.DefaultDownloadPresignedUrlExpiration) if err != nil { - log.Error("HandleGetPresignedURLMultiObj: Error getting presigned URL:", err.Error()) - return c.JSON(http.StatusInternalServerError, err.Error()) + errMsg := fmt.Errorf("error getting presigned URL: %s", err) + log.Error(errMsg.Error()) + return c.JSON(http.StatusInternalServerError, errMsg.Error()) } - log.Info("HandleGetPresignedURLMultiObj: Successfully generated presigned URL for prefix:", prefix) + log.Info("successfully generated presigned URL for prefix:", prefix) return c.JSON(http.StatusOK, string(href)) } func (bh *BlobHandler) HandleGenerateDownloadScript(c echo.Context) error { prefix := c.QueryParam("prefix") - bucket := c.QueryParam("bucket") - if prefix == "" || bucket == "" { - errMsg := fmt.Errorf("`prefix` and `bucket` query params are required") + if prefix == "" { + errMsg := fmt.Errorf("`prefix` query params are required") log.Error(errMsg.Error()) return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } - if !strings.HasSuffix(prefix, "/") { - prefix += "/" - } + bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) if err != nil { errMsg := fmt.Errorf("error getting controller for bucket %s: %s", bucket, err) @@ -264,37 +269,10 @@ func (bh *BlobHandler) HandleGenerateDownloadScript(c echo.Context) error { return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } - //list objects within the prefix and test if empty - response, err := s3Ctrl.GetList(bucket, prefix, false) - if err != nil { - errMsg := fmt.Errorf("error listing objects in bucket %s with prefix %s: %s", bucket, prefix, err) - log.Error(errMsg.Error()) - return c.JSON(http.StatusInternalServerError, errMsg.Error()) - } - if len(response.Contents) == 0 { - errMsg := fmt.Errorf("prefix %s is empty or does not exist", prefix) - log.Error(errMsg.Error()) - return c.JSON(http.StatusBadRequest, errMsg.Error()) - } - size, _, err := bh.GetSize(response) - if err != nil { - errMsg := fmt.Errorf("error retrieving size of prefix: %s", err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusInternalServerError, errMsg.Error()) - } - - limit := uint64(1024 * 1024 * 1024 * bh.Config.DefaultScriptDownloadSizeLimit) - if size > limit { - errMsg := fmt.Errorf("request entity is larger than %v GB, current prefix size is: %v GB", bh.Config.DefaultScriptDownloadSizeLimit, float64(size)/(1024*1024*1024)) - log.Error(errMsg.Error()) - return c.JSON(http.StatusRequestEntityTooLarge, errMsg.Error()) - } - - //expiration period from the env - + var totalSize uint64 var scriptBuilder strings.Builder createdDirs := make(map[string]bool) - // Add download instructions at the beginning of the script + basePrefix := filepath.Base(strings.TrimSuffix(prefix, "/")) scriptBuilder.WriteString("REM Download Instructions\n") scriptBuilder.WriteString("REM To download the selected directory or file, please follow these steps:\n\n") scriptBuilder.WriteString("REM 1. Locate the Downloaded File: Find the file you just downloaded. It should have a .txt file extension.\n") @@ -302,41 +280,49 @@ func (bh *BlobHandler) HandleGenerateDownloadScript(c echo.Context) error { scriptBuilder.WriteString("REM 3. Rename the File: Right-click on the file, select \"Rename,\" and change the file extension from \".txt\" to \".bat.\" For example, if the file is named \"script.txt,\" rename it to \"script.bat.\"\n") scriptBuilder.WriteString("REM 4. Initiate the Download: Double-click the renamed \".bat\" file to initiate the download process. Windows might display a warning message to protect your PC.\n") scriptBuilder.WriteString("REM 5. Windows Defender SmartScreen (Optional): If you see a message like \"Windows Defender SmartScreen prevented an unrecognized app from starting,\" click \"More info\" and then click \"Run anyway\" to proceed with the download.\n\n") - //iterate over every object and check if it has any sub-prefixes to maintain a directory structure - - basePrefix := filepath.Base(strings.TrimSuffix(prefix, "/")) scriptBuilder.WriteString(fmt.Sprintf("mkdir \"%s\"\n", basePrefix)) - for _, item := range response.Contents { - // Remove the prefix up to the base, keeping the structure under the base prefix - relativePath := strings.TrimPrefix(*item.Key, filepath.Dir(prefix)+"/") - - // Calculate the directory path for the relative path - dirPath := filepath.Join(basePrefix, filepath.Dir(relativePath)) + // Define the processPage function + processPage := func(page *s3.ListObjectsV2Output) error { + for _, item := range page.Contents { + // Size checking + if item.Size != nil { + totalSize += uint64(*item.Size) + if totalSize > uint64(bh.Config.DefaultScriptDownloadSizeLimit*1024*1024*1024) { + return fmt.Errorf("size limit of %d GB exceeded", bh.Config.DefaultScriptDownloadSizeLimit) + } - // Create directory if it does not exist and is not the root - if _, exists := createdDirs[dirPath]; !exists && dirPath != basePrefix { - scriptBuilder.WriteString(fmt.Sprintf("mkdir \"%s\"\n", dirPath)) - createdDirs[dirPath] = true - } + } - // Create the full path for the object including the base prefix - fullPath := filepath.Join(basePrefix, relativePath) + // Script generation logic (replicating your directory creation and URL logic) + relativePath := strings.TrimPrefix(*item.Key, filepath.Dir(prefix)+"/") + dirPath := filepath.Join(basePrefix, filepath.Dir(relativePath)) + if _, exists := createdDirs[dirPath]; !exists && dirPath != basePrefix { + scriptBuilder.WriteString(fmt.Sprintf("mkdir \"%s\"\n", dirPath)) + createdDirs[dirPath] = true + } - presignedURL, err := s3Ctrl.GetDownloadPresignedURL(bucket, *item.Key, bh.Config.DefaultDownloadPresignedUrlExpiration) - if err != nil { - errMsg := fmt.Errorf("error generating presigned URL for %s: %s", *item.Key, err) - log.Error(errMsg.Error()) - return c.JSON(http.StatusInternalServerError, errMsg.Error()) - } - url, err := url.QueryUnescape(presignedURL) //to remove url encoding which causes errors when executed in terminal - if err != nil { - errMsg := fmt.Errorf("error Unescaping url encoding: %s", err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusInternalServerError, errMsg.Error()) + fullPath := filepath.Join(basePrefix, relativePath) + presignedURL, err := s3Ctrl.GetDownloadPresignedURL(bucket, *item.Key, bh.Config.DefaultDownloadPresignedUrlExpiration) + if err != nil { + return fmt.Errorf("error generating presigned URL for object %s: %v", *item.Key, err) + } + url, err := url.QueryUnescape(presignedURL) + if err != nil { + return fmt.Errorf("error unescaping URL encoding: %v", err) + } + encodedURL := strings.ReplaceAll(url, " ", "%20") + scriptBuilder.WriteString(fmt.Sprintf("if exist \"%s\" (echo skipping existing file) else (curl -v -o \"%s\" \"%s\")\n", fullPath, fullPath, encodedURL)) } - encodedURL := strings.ReplaceAll(url, " ", "%20") - scriptBuilder.WriteString(fmt.Sprintf("if exist \"%s\" (echo skipping existing file) else (curl -v -o \"%s\" \"%s\")\n", fullPath, fullPath, encodedURL)) + return nil + } + + // Call GetList with the processPage function + err = s3Ctrl.GetListWithCallBack(bucket, prefix, false, processPage) + if err != nil { + errMsg := fmt.Errorf("error processing objects: %s", err.Error()) + log.Error(errMsg.Error()) + return c.JSON(http.StatusInternalServerError, errMsg.Error()) } txtBatFileName := fmt.Sprintf("%s_download_script.txt", strings.TrimSuffix(prefix, "/")) @@ -351,7 +337,7 @@ func (bh *BlobHandler) HandleGenerateDownloadScript(c echo.Context) error { ContentType: aws.String("binary/octet-stream"), }) if err != nil { - errMsg := fmt.Errorf("error uploading %s to S3: %s", txtBatFileName, err) + errMsg := fmt.Errorf("error uploading %s to S3: %s", txtBatFileName, err.Error()) log.Error(errMsg.Error()) return c.JSON(http.StatusInternalServerError, errMsg.Error()) } @@ -363,6 +349,6 @@ func (bh *BlobHandler) HandleGenerateDownloadScript(c echo.Context) error { return c.JSON(http.StatusInternalServerError, errMsg.Error()) } - log.Infof("Successfully generated download script for prefix %s in bucket %s", prefix, bucket) + log.Infof("successfully generated download script for prefix %s in bucket %s", prefix, bucket) return c.JSON(http.StatusOK, href) } diff --git a/blobstore/upload.go b/blobstore/upload.go index ef83ecd..e26ea70 100644 --- a/blobstore/upload.go +++ b/blobstore/upload.go @@ -2,7 +2,6 @@ package blobstore import ( "bytes" - "errors" "fmt" "io" "net/http" @@ -10,8 +9,10 @@ import ( "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" "github.com/labstack/echo/v4" + log "github.com/sirupsen/logrus" ) @@ -24,7 +25,7 @@ func (s3Ctrl *S3Controller) UploadS3Obj(bucket string, key string, body io.ReadC resp, err := s3Ctrl.S3Svc.CreateMultipartUpload(params) if err != nil { - return fmt.Errorf("uploadS3Obj: error initializing multipart upload. %s", err.Error()) + return fmt.Errorf("error initializing multipart upload. %s", err.Error()) } // Create the variables that will track upload progress @@ -41,7 +42,7 @@ func (s3Ctrl *S3Controller) UploadS3Obj(bucket string, key string, body io.ReadC // This would be a true error while reading if err != nil && err != io.EOF { - return fmt.Errorf("uploadS3Obj: error copying POST body to S3. %s", err.Error()) + return fmt.Errorf("error copying POST body to S3. %s", err.Error()) } // Add the buffer data to the buffer @@ -59,7 +60,7 @@ func (s3Ctrl *S3Controller) UploadS3Obj(bucket string, key string, body io.ReadC result, err := s3Ctrl.S3Svc.UploadPart(params) if err != nil { - return fmt.Errorf("uploadS3Obj: error streaming POST body to S3. %s, %+v", err.Error(), result) + return fmt.Errorf("error streaming POST body to S3. %s, %+v", err.Error(), result) } totalBytes += int64(buffer.Len()) @@ -88,7 +89,7 @@ func (s3Ctrl *S3Controller) UploadS3Obj(bucket string, key string, body io.ReadC result, err := s3Ctrl.S3Svc.UploadPart(params2) if err != nil { - return fmt.Errorf("uploadS3Obj: error streaming POST body to S3. %s, %+v", err.Error(), result) + return fmt.Errorf("error streaming POST body to S3. %s, %+v", err.Error(), result) } totalBytes += int64(buffer.Len()) @@ -106,7 +107,7 @@ func (s3Ctrl *S3Controller) UploadS3Obj(bucket string, key string, body io.ReadC } _, err = s3Ctrl.S3Svc.CompleteMultipartUpload(completeParams) if err != nil { - return fmt.Errorf("uploadS3Obj: error completing multipart upload. %s", err.Error()) + return fmt.Errorf("error completing multipart upload. %s", err.Error()) } return nil @@ -116,15 +117,15 @@ func (bh *BlobHandler) HandleMultipartUpload(c echo.Context) error { // Add overwrite check and parameter key := c.QueryParam("key") if key == "" { - err := errors.New("parameter 'key' is required") - log.Error("HandleMultipartUpload: " + err.Error()) - return c.JSON(http.StatusUnprocessableEntity, err.Error()) + errMsg := fmt.Errorf("parameter 'key' is required") + log.Error(errMsg.Error()) + return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) if err != nil { - errMsg := fmt.Errorf("bucket %s is not available, %s", bucket, err.Error()) + errMsg := fmt.Errorf("`bucket` %s is not available, %s", bucket, err.Error()) log.Error(errMsg.Error()) return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } @@ -144,23 +145,24 @@ func (bh *BlobHandler) HandleMultipartUpload(c echo.Context) error { var err error override, err = strconv.ParseBool(c.QueryParam("override")) if err != nil { - log.Errorf("HandleMultipartUpload: Error parsing 'override' parameter: %s", err.Error()) - return c.JSON(http.StatusUnprocessableEntity, err.Error()) + errMsg := fmt.Errorf("error parsing 'override' parameter: %s", err.Error()) + log.Error(errMsg.Error()) + return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } } else { - err := errors.New("request must include a `override`, options are `true` or `false`") - log.Errorf("HandleMultipartUpload: %s", err.Error()) - return c.JSON(http.StatusUnprocessableEntity, err.Error()) + errMsg := fmt.Errorf("request must include a `override`, options are `true` or `false`") + log.Error(errMsg.Error()) + return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } // Check if the request body is empty buf := make([]byte, 1) _, err = c.Request().Body.Read(buf) if err == io.EOF { - err := errors.New("no file provided in the request body") - log.Error("HandleMultipartUpload: " + err.Error()) - return c.JSON(http.StatusBadRequest, err.Error()) // Return 400 Bad Request + errMsg := fmt.Errorf("no file provided in the request body") + log.Error(errMsg.Error()) + return c.JSON(http.StatusBadRequest, errMsg.Error()) // Return 400 Bad Request } // Reset the request body to its original state @@ -168,13 +170,14 @@ func (bh *BlobHandler) HandleMultipartUpload(c echo.Context) error { keyExist, err := s3Ctrl.KeyExists(bucket, key) if err != nil { - log.Errorf("HandleMultipartUpload: Error checking if key exists: %s", err.Error()) - return c.JSON(http.StatusInternalServerError, err) + errMsg := fmt.Errorf("error checking if object exists: %s", err.Error()) + log.Error(errMsg.Error()) + return c.JSON(http.StatusInternalServerError, errMsg.Error()) } if keyExist && !override { - err := fmt.Errorf("object %s already exists and override is set to %t", key, override) - log.Errorf("HandleMultipartUpload: %s" + err.Error()) - return c.JSON(http.StatusConflict, err.Error()) + errMsg := fmt.Errorf("object %s already exists and override is set to %t", key, override) + log.Errorf(errMsg.Error()) + return c.JSON(http.StatusConflict, errMsg.Error()) } body := c.Request().Body @@ -182,11 +185,12 @@ func (bh *BlobHandler) HandleMultipartUpload(c echo.Context) error { err = s3Ctrl.UploadS3Obj(bucket, key, body) if err != nil { - log.Errorf("HandleMultipartUpload: Error uploading S3 object: %s", err.Error()) - return c.JSON(http.StatusInternalServerError, err.Error()) + errMsg := fmt.Errorf("error uploading S3 object: %s", err.Error()) + log.Errorf(errMsg.Error()) + return c.JSON(http.StatusInternalServerError, errMsg.Error()) } - log.Infof("HandleMultipartUpload: Successfully uploaded file with key: %s", key) + log.Infof("Successfully uploaded file with key: %s", key) return c.JSON(http.StatusOK, "Successfully uploaded file") } @@ -209,16 +213,45 @@ func (s3Ctrl *S3Controller) GetUploadPresignedURL(bucket string, key string, exp // function to retrieve presigned url for a multipart upload part. func (s3Ctrl *S3Controller) GetUploadPartPresignedURL(bucket string, key string, uploadID string, partNumber int64, expMin int) (string, error) { duration := time.Duration(expMin) * time.Minute - req, _ := s3Ctrl.S3Svc.UploadPartRequest(&s3.UploadPartInput{ - Bucket: aws.String(bucket), - Key: aws.String(key), - UploadId: aws.String(uploadID), - PartNumber: aws.Int64(partNumber), - }) + var urlStr string + var err error + if s3Ctrl.S3Mock { + // Create a temporary S3 client with the modified endpoint + //this is done so that the presigned url starts with localhost:9000 instead of + //minio:9000 which would cause an error due to cors origin policy + tempS3Svc, err := session.NewSession(&aws.Config{ + Endpoint: aws.String("http://localhost:9000"), + Region: s3Ctrl.S3Svc.Config.Region, + Credentials: s3Ctrl.S3Svc.Config.Credentials, + S3ForcePathStyle: aws.Bool(true), + }) + if err != nil { + return "", fmt.Errorf("error creating temporary s3 session: %s", err.Error()) + } - urlStr, err := req.Presign(duration) - if err != nil { - return "", err + // Generate the request using the temporary client + req, _ := s3.New(tempS3Svc).UploadPartRequest(&s3.UploadPartInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + UploadId: aws.String(uploadID), + PartNumber: aws.Int64(partNumber), + }) + urlStr, err = req.Presign(duration) + if err != nil { + return "", err + } + } else { + // Generate the request using the original client + req, _ := s3Ctrl.S3Svc.UploadPartRequest(&s3.UploadPartInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + UploadId: aws.String(uploadID), + PartNumber: aws.Int64(partNumber), + }) + urlStr, err = req.Presign(duration) + if err != nil { + return "", err + } } return urlStr, nil @@ -236,7 +269,7 @@ func (bh *BlobHandler) HandleGetPresignedUploadURL(c echo.Context) error { //get controller for bucket s3Ctrl, err := bh.GetController(bucket) if err != nil { - errMsg := fmt.Errorf("bucket %s is not available, %s", bucket, err.Error()) + errMsg := fmt.Errorf("`bucket` %s is not available, %s", bucket, err.Error()) log.Error(errMsg.Error()) return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } @@ -259,8 +292,9 @@ func (bh *BlobHandler) HandleGetPresignedUploadURL(c echo.Context) error { } presignedURL, err := s3Ctrl.GetUploadPartPresignedURL(bucket, key, uploadID, int64(partNumber), bh.Config.DefaultUploadPresignedUrlExpiration) if err != nil { - log.Errorf("error generating presigned part URL: %s", err.Error()) - return c.JSON(http.StatusInternalServerError, err.Error()) + errMsg := fmt.Errorf("error generating presigned part URL: %s", err.Error()) + log.Error(errMsg.Error()) + return c.JSON(http.StatusInternalServerError, errMsg.Error()) } log.Infof("successfully generated presigned part URL for key: %s", key) return c.JSON(http.StatusOK, presignedURL) @@ -305,7 +339,7 @@ func (bh *BlobHandler) HandleGetMultipartUploadID(c echo.Context) error { //get controller for bucket s3Ctrl, err := bh.GetController(bucket) if err != nil { - errMsg := fmt.Errorf("bucket %s is not available, %s", bucket, err.Error()) + errMsg := fmt.Errorf("`bucket` %s is not available, %s", bucket, err.Error()) log.Error(errMsg.Error()) return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } @@ -354,7 +388,7 @@ func (bh *BlobHandler) HandleCompleteMultipartUpload(c echo.Context) error { bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) if err != nil { - errMsg := fmt.Errorf("bucket %s is not available, %s", bucket, err.Error()) + errMsg := fmt.Errorf("`bucket` %s is not available, %s", bucket, err.Error()) log.Error(errMsg.Error()) return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } @@ -422,7 +456,7 @@ func (bh *BlobHandler) HandleAbortMultipartUpload(c echo.Context) error { bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) if err != nil { - errMsg := fmt.Errorf("bucket %s is not available, %s", bucket, err.Error()) + errMsg := fmt.Errorf("`bucket` %s is not available, %s", bucket, err.Error()) log.Error(errMsg.Error()) return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } diff --git a/e2e-test/e2eCollection.json b/e2e-test/e2eCollection.json index 00a59d3..6291595 100644 --- a/e2e-test/e2eCollection.json +++ b/e2e-test/e2eCollection.json @@ -296,7 +296,7 @@ { "key": "", "type": "file", - "src": "staff_table.csv" + "src": "missing_huc8s.xlsx" } ] }, @@ -776,6 +776,45 @@ }, "response": [] }, + { + "name": "10.2/prefix/download_script", + "request": { + "auth": { + "type": "bearer", + "bearer": [ + { + "key": "token", + "value": "{{bearer_token}}", + "type": "string" + } + ] + }, + "method": "GET", + "header": [], + "url": { + "raw": "{{s3_api_root_url}}/prefix/download/script?prefix={{e2ePathToObj}}&bucket={{bucket}}", + "host": [ + "{{s3_api_root_url}}" + ], + "path": [ + "prefix", + "download", + "script" + ], + "query": [ + { + "key": "prefix", + "value": "{{e2ePathToObj}}" + }, + { + "key": "bucket", + "value": "{{bucket}}" + } + ] + } + }, + "response": [] + }, { "name": "11/object/upload 2", "event": [ @@ -817,7 +856,7 @@ { "key": "", "type": "file", - "src": "staff_table.csv" + "src": "missing_huc8s.xlsx" } ] }, @@ -1040,7 +1079,7 @@ { "key": "", "type": "file", - "src": "staff_table.csv" + "src": "missing_huc8s.xlsx" } ] }, @@ -1112,7 +1151,7 @@ { "key": "", "type": "file", - "src": "staff_table.csv" + "src": "missing_huc8s.xlsx" } ] }, @@ -1195,6 +1234,362 @@ } }, "response": [] + }, + { + "name": "18/object/presigned_upload", + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "auth": { + "type": "bearer", + "bearer": [ + { + "key": "token", + "value": "{{bearer_token}}", + "type": "string" + } + ] + }, + "method": "GET", + "header": [], + "url": { + "raw": "{{s3_api_root_url}}/object/presigned_upload?key={{e2eObjName}}&bucket={{bucket}}", + "host": [ + "{{s3_api_root_url}}" + ], + "path": [ + "object", + "presigned_upload" + ], + "query": [ + { + "key": "key", + "value": "{{e2eObjName}}" + }, + { + "key": "bucket", + "value": "{{bucket}}" + } + ] + } + }, + "response": [] + }, + { + "name": "19/object/multipart_upload_id", + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "var jsonData = pm.response.json();\r", + "pm.environment.set(\"uploadId\", jsonData);\r", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "auth": { + "type": "bearer", + "bearer": [ + { + "key": "token", + "value": "{{bearer_token}}", + "type": "string" + } + ] + }, + "method": "GET", + "header": [], + "url": { + "raw": "{{s3_api_root_url}}/object/multipart_upload_id?key={{e2eObjName}}&bucket={{bucket}}", + "host": [ + "{{s3_api_root_url}}" + ], + "path": [ + "object", + "multipart_upload_id" + ], + "query": [ + { + "key": "key", + "value": "{{e2eObjName}}" + }, + { + "key": "bucket", + "value": "{{bucket}}" + } + ] + } + }, + "response": [] + }, + { + "name": "19/object/presigned_upload_multipart", + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "var jsonData = pm.response.json()//.replace('minio', 'localhost');\r", + "pm.environment.set(\"presignedUploadUrl\", jsonData);\r", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "auth": { + "type": "bearer", + "bearer": [ + { + "key": "token", + "value": "{{bearer_token}}", + "type": "string" + } + ] + }, + "method": "GET", + "header": [], + "url": { + "raw": "{{s3_api_root_url}}/object/presigned_upload?key={{e2eObjName}}&bucket={{bucket}}&upload_id={{uploadId}}&part_number=1", + "host": [ + "{{s3_api_root_url}}" + ], + "path": [ + "object", + "presigned_upload" + ], + "query": [ + { + "key": "key", + "value": "{{e2eObjName}}" + }, + { + "key": "bucket", + "value": "{{bucket}}" + }, + { + "key": "upload_id", + "value": "{{uploadId}}" + }, + { + "key": "part_number", + "value": "1" + } + ] + } + }, + "response": [] + }, + { + "name": "presigned_url_test", + "event": [ + { + "listen": "prerequest", + "script": { + "exec": [ + "" + ], + "type": "text/javascript" + } + }, + { + "listen": "test", + "script": { + "exec": [ + "// Extract ETag from the response header\r", + "var etag = pm.response.headers.get(\"ETag\");\r", + "\r", + "\r", + "// Remove extra quotation marks if present\r", + "etag = etag.replace(/\"/g, '');\r", + "\r", + "\r", + "// Set the ETag as an environment variable\r", + "pm.environment.set(\"etag\", etag);\r", + "" + ], + "type": "text/javascript" + } + } + ], + "protocolProfileBehavior": { + "disabledSystemHeaders": {} + }, + "request": { + "method": "PUT", + "header": [], + "body": { + "mode": "formdata", + "formdata": [ + { + "key": "", + "type": "file", + "src": "missing_huc8s.xlsx" + } + ] + }, + "url": { + "raw": "{{presignedUploadUrl}}", + "host": [ + "{{presignedUploadUrl}}" + ] + } + }, + "response": [] + }, + { + "name": "20/object/complete_multipart_upload", + "request": { + "auth": { + "type": "bearer", + "bearer": [ + { + "key": "token", + "value": "{{bearer_token}}", + "type": "string" + } + ] + }, + "method": "POST", + "header": [], + "body": { + "mode": "raw", + "raw": "{\r\n \"uploadId\": \"{{uploadId}}\",\r\n \"parts\": [{\r\n \"partNumber\":1,\r\n \"eTag\":\"{{etag}}\"\r\n }]\r\n}\r\n", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{s3_api_root_url}}/object/complete_multipart_upload?key={{e2eObjName}}&bucket={{bucket}}", + "host": [ + "{{s3_api_root_url}}" + ], + "path": [ + "object", + "complete_multipart_upload" + ], + "query": [ + { + "key": "key", + "value": "{{e2eObjName}}" + }, + { + "key": "bucket", + "value": "{{bucket}}" + } + ] + } + }, + "response": [] + }, + { + "name": "20/object/multipart_upload_id", + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "var jsonData = pm.response.json();\r", + "pm.environment.set(\"uploadId\", jsonData);\r", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "auth": { + "type": "bearer", + "bearer": [ + { + "key": "token", + "value": "{{bearer_token}}", + "type": "string" + } + ] + }, + "method": "GET", + "header": [], + "url": { + "raw": "{{s3_api_root_url}}/object/multipart_upload_id?key={{e2eObjName}}&bucket={{bucket}}", + "host": [ + "{{s3_api_root_url}}" + ], + "path": [ + "object", + "multipart_upload_id" + ], + "query": [ + { + "key": "key", + "value": "{{e2eObjName}}" + }, + { + "key": "bucket", + "value": "{{bucket}}" + } + ] + } + }, + "response": [] + }, + { + "name": "21/object/abort_multipart_upload", + "request": { + "auth": { + "type": "bearer", + "bearer": [ + { + "key": "token", + "value": "{{bearer_token}}", + "type": "string" + } + ] + }, + "method": "POST", + "header": [], + "url": { + "raw": "{{s3_api_root_url}}/object/abort_multipart_upload?key={{e2eObjName}}&bucket={{bucket}}&upload_id={{uploadId}}", + "host": [ + "{{s3_api_root_url}}" + ], + "path": [ + "object", + "abort_multipart_upload" + ], + "query": [ + { + "key": "key", + "value": "{{e2eObjName}}" + }, + { + "key": "bucket", + "value": "{{bucket}}" + }, + { + "key": "upload_id", + "value": "{{uploadId}}" + } + ] + } + }, + "response": [] } ], "event": [ @@ -1267,7 +1662,7 @@ { "key": "", "type": "file", - "src": "staff_table.csv" + "src": "missing_huc8s.xlsx" } ] }, @@ -1339,7 +1734,7 @@ { "key": "", "type": "file", - "src": "staff_table.csv" + "src": "missing_huc8s.xlsx" } ] }, @@ -1438,7 +1833,7 @@ { "key": "", "type": "file", - "src": "staff_table.csv" + "src": "missing_huc8s.xlsx" } ] }, @@ -1514,7 +1909,7 @@ { "key": "", "type": "file", - "src": "staff_table.csv" + "src": "missing_huc8s.xlsx" } ] }, @@ -1587,7 +1982,7 @@ { "key": "", "type": "file", - "src": "staff_table.csv" + "src": "missing_huc8s.xlsx" } ] }, @@ -2183,6 +2578,57 @@ }, "response": [] }, + { + "name": "10.2/prefix/download/script", + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "auth": { + "type": "bearer", + "bearer": [ + { + "key": "token", + "value": "{{bearer_token}}", + "type": "string" + } + ] + }, + "method": "GET", + "header": [], + "url": { + "raw": "{{s3_api_root_url}}/prefix/download/script?bucket={{bucket}}", + "host": [ + "{{s3_api_root_url}}" + ], + "path": [ + "prefix", + "download", + "script" + ], + "query": [ + { + "key": "prefix", + "value": "{{e2ePathToObj}}", + "disabled": true + }, + { + "key": "bucket", + "value": "{{bucket}}" + } + ] + } + }, + "response": [] + }, { "name": "12/prefix/move", "event": [ @@ -3094,7 +3540,10 @@ { "key": "", "type": "file", - "src": "staff_table.csv" + "src": [ + "missing_huc8s.xlsx", + "missing_huc8s.xlsx" + ] } ] }, @@ -3403,5 +3852,11 @@ } ] } + ], + "variable": [ + { + "key": "presignedUploadUrl", + "value": "\"\"" + } ] } \ No newline at end of file diff --git a/e2e-test/e2eEnv.json b/e2e-test/e2eEnv.json index d1e1dc9..f5b8e48 100644 --- a/e2e-test/e2eEnv.json +++ b/e2e-test/e2eEnv.json @@ -52,7 +52,7 @@ }, { "key": "e2eObjName", - "value": "staff_table3.txt", + "value": "missing_huc8s.xlsx", "type": "default", "enabled": true }, @@ -73,6 +73,24 @@ "value": "test-bucket", "type": "default", "enabled": true + }, + { + "key": "uploadId", + "value": "", + "type": "any", + "enabled": true + }, + { + "key": "presignedUploadUrl", + "value": "", + "type": "any", + "enabled": true + }, + { + "key": "etag", + "value": "", + "type": "any", + "enabled": true } ], "_postman_variable_scope": "environment", diff --git a/e2e-test/missing_huc8s.xlsx b/e2e-test/missing_huc8s.xlsx new file mode 100644 index 0000000..d18cf6f Binary files /dev/null and b/e2e-test/missing_huc8s.xlsx differ diff --git a/e2e-test/staff_table.csv b/e2e-test/staff_table.csv deleted file mode 100644 index 6f9539d..0000000 --- a/e2e-test/staff_table.csv +++ /dev/null @@ -1 +0,0 @@ -TEST,TEST,TEST,TEST,TEST,TEST \ No newline at end of file diff --git a/env-checker/env_checker.go b/env-checker/env_checker.go index d903e52..e76af80 100644 --- a/env-checker/env_checker.go +++ b/env-checker/env_checker.go @@ -1,7 +1,6 @@ package envcheck import ( - "errors" "fmt" "os" ) @@ -20,7 +19,7 @@ func CheckEnvVariablesExist(envVars []string) error { if len(missingVars) > 0 { errMsg := fmt.Sprintf("The following environment variables are missing: %v", missingVars) - return errors.New(errMsg) + return fmt.Errorf(errMsg) } return nil