diff --git a/.github/scripts/gcs/Makefile b/.github/scripts/gcs/Makefile index 7c96eae..1bd200d 100644 --- a/.github/scripts/gcs/Makefile +++ b/.github/scripts/gcs/Makefile @@ -87,6 +87,7 @@ test-int: export MULTIREGIONAL_BUCKET_NAME="$$(cat $(TMP_DIR)/multiregional.lock)" && \ export REGIONAL_BUCKET_NAME="$$(cat $(TMP_DIR)/regional.lock)" && \ export PUBLIC_BUCKET_NAME="$$(cat $(TMP_DIR)/public.lock)" && \ + export SKIP_LONG_TESTS="" && \ cd ../../.. && go run github.com/onsi/ginkgo/v2/ginkgo gcs/integration/ # Perform all non-long tests, including integration tests. diff --git a/.github/scripts/s3/run-integration-aws-iam.sh b/.github/scripts/s3/run-integration-aws-iam.sh index a6e2064..498acb9 100755 --- a/.github/scripts/s3/run-integration-aws-iam.sh +++ b/.github/scripts/s3/run-integration-aws-iam.sh @@ -33,10 +33,12 @@ trap "cat ${lambda_log}" EXIT # Go to the repository root (3 levels up from script directory) pushd "${repo_root}" > /dev/null - + export CGO_ENABLED=0 + export GOOS=linux + export GOARCH=amd64 echo -e "\n building artifact with $(go version)..." - CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o out/s3cli ./s3 - CGO_ENABLED=0 ginkgo build s3/integration + go build -o out/s3cli + ginkgo build s3/integration zip -j payload.zip s3/integration/integration.test out/s3cli ${script_dir}/assets/lambda_function.py @@ -51,6 +53,7 @@ pushd "${repo_root}" > /dev/null --handler lambda_function.test_runner_handler \ --runtime python3.9 + echo "Create done, invoking Lambda function ${lambda_function_name}..." set +e tries=0 get_function_status_command="aws lambda get-function --region ${region_name} --function-name ${lambda_function_name}" diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index f97af8a..d2eca6c 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -17,102 +17,22 @@ jobs: with: go-version-file: 'go.mod' - - name: Alioss CLI Build for Linux + - name: Storage-CLI Build for Linux env: GOOS: linux GOARCH: amd64 CGO_ENABLED: 0 run: | - echo "Building Alioss CLI for Linux" - go build -o "alioss-cli-linux-amd64" ./alioss - sha1sum "alioss-cli-linux-amd64" + echo "Building Storage CLI for Linux" + go build -o "storage-cli-linux-amd64" + sha1sum "storage-cli-linux-amd64" - - name: Alioss CLI Build for Windows + - name: Storage-CLI Build for Windows env: GOOS: windows GOARCH: amd64 CGO_ENABLED: 0 run: | - echo "Building Alioss CLI for Windows" - go build -o "alioss-cli-windows-amd64.exe" ./alioss - sha1sum "alioss-cli-windows-amd64.exe" - - - name: Azurebs CLI Build for Linux - env: - GOOS: linux - GOARCH: amd64 - CGO_ENABLED: 0 - run: | - echo "Building Azurebs CLI for Linux" - go build -o "azurebs-cli-linux-amd64" ./azurebs - sha1sum "azurebs-cli-linux-amd64" - - - name: Azurebs CLI Build for Windows - env: - GOOS: windows - GOARCH: amd64 - CGO_ENABLED: 0 - run: | - echo "Building Azurebs CLI for Windows" - go build -o "azurebs-cli-windows-amd64.exe" ./azurebs - sha1sum "azurebs-cli-windows-amd64.exe" - - - name: Dav CLI Build for Linux - env: - GOOS: linux - GOARCH: amd64 - CGO_ENABLED: 0 - run: | - echo "Building Dav CLI for Linux" - go build -o "dav-cli-linux-amd64" ./dav/main - sha1sum "dav-cli-linux-amd64" - - - name: Dav CLI Build for Windows - env: - GOOS: windows - GOARCH: amd64 - CGO_ENABLED: 0 - run: | - echo "Building Dav CLI for Windows" - go build -o "dav-cli-windows-amd64.exe" ./dav/main - sha1sum "dav-cli-windows-amd64.exe" - - - name: GCS CLI Build for Linux - env: - GOOS: linux - GOARCH: amd64 - CGO_ENABLED: 0 - run: | - echo "Building Gcs CLI for Linux" - go build -o "gcs-cli-linux-amd64" ./gcs - sha1sum "gcs-cli-linux-amd64" - - - name: GCS CLI Build for Windows - env: - GOOS: windows - GOARCH: amd64 - CGO_ENABLED: 0 - run: | - echo "Building Gcs CLI for Windows" - go build -o "gcs-cli-windows-amd64.exe" ./gcs - sha1sum "gcs-cli-windows-amd64.exe" - - - name: S3 CLI Build for Linux - env: - GOOS: linux - GOARCH: amd64 - CGO_ENABLED: 0 - run: | - echo "Building S3 CLI for Linux" - go build -o "s3-cli-linux-amd64" ./s3 - sha1sum "s3-cli-linux-amd64" - - - name: S3 CLI Build for Windows - env: - GOOS: windows - GOARCH: amd64 - CGO_ENABLED: 0 - run: | - echo "Building S3 CLI for Windows" - go build -o "s3-cli-windows-amd64.exe" ./s3 - sha1sum "s3-cli-windows-amd64.exe" \ No newline at end of file + echo "Building Storage CLI for Windows" + go build -o "storage-cli-windows-amd64.exe" + sha1sum "storage-cli-windows-amd64.exe" \ No newline at end of file diff --git a/.gitignore b/.gitignore index 8e1cc9e..bd55077 100644 --- a/.gitignore +++ b/.gitignore @@ -27,3 +27,5 @@ # IDE/editor .vscode/ .idea/ + +.DS_Store diff --git a/README.md b/README.md index 87e6f9e..530ce19 100644 --- a/README.md +++ b/README.md @@ -1,15 +1,17 @@ # Storage CLI -This repository consolidates five independent blob-storage CLIs, one per provider, into a single codebase. Each provider has its own dedicated directory (azurebs/, s3/, gcs/, alioss/, dav/), containing an independent main package and implementation. The tools are intentionally maintained as separate binaries, preserving each provider’s native SDK, command-line flags, and operational semantics. Each CLI exposes similar high-level operations (e.g., put, get, delete). +A unified command-line tool for interacting with multiple cloud storage providers through a single binary. The CLI supports five blob-storage providers (Azure Blob Storage, AWS S3, Google Cloud Storage, Alibaba Cloud OSS, and WebDAV), each with its own client implementation while sharing a common command interface. + +**Note:** This CLI works with existing storage resources (buckets, containers, etc.) that are already created and configured in your cloud provider. The storage bucket/container name and credentials must be specified in the provider-specific configuration file. Key points -- Each provider builds independently. +- Single binary with provider selection via `-s` flag. -- Client setup, config, and options are contained within the provider’s folder. +- Each provider has its own directory (azurebs/, s3/, gcs/, alioss/, dav/) containing client implementations and configurations. -- All tools support the same core commands (such as put, get, and delete) for a familiar workflow, while each provider defines its own flags, parameters, and execution flow that align with its native SDK and terminology. +- All providers support the same core commands (put, get, delete, exists, list, copy, etc.). -- Central issue tracking, shared CI, and aligned release process without merging implementations. +- Provider-specific configurations are passed via JSON config files. ## Providers @@ -21,12 +23,86 @@ Key points ## Build -Use following command to build it locally +Build the unified storage CLI binary: + +```shell +go build -o storage-cli +``` +Or with version information: ```shell -go build -o / /main.go +go build -ldflags "-X main.version=1.0.0" -o storage-cli ``` -e.g. `go build -o alioss/alioss-cli alioss/main.go` + +## Usage + +The CLI uses a unified command structure across all providers: + +```shell +storage-cli -s -c [arguments] +``` + +**Flags:** +- `-s`: Storage provider type (azurebs|s3|gcs|alioss|dav) +- `-c`: Path to provider-specific configuration file +- `-v`: Show version + +**Common commands:** +- `put ` - Upload a local file to remote storage +- `get ` - Download a remote object to local file +- `delete ` - Delete a remote object +- `delete-recursive [prefix]` - Delete objects recursively. If prefix is omitted, deletes all objects +- `exists ` - Check if a remote object exists (exits with code 3 if not found) +- `list [prefix]` - List remote objects. If prefix is omitted, lists all objects +- `copy ` - Copy object within the same storage +- `sign ` - Generate signed URL (action: get|put, duration: e.g., 60s) +- `properties ` - Display properties/metadata of a remote object +- `ensure-storage-exists` - Ensure the storage container/bucket exists + +**Examples:** +```shell +# Upload file to S3 +storage-cli -s s3 -c s3-config.json put local-file.txt remote-object.txt + +# List GCS objects with prefix +storage-cli -s gcs -c gcs-config.json list my-prefix + +# Check if Azure blob exists +storage-cli -s azurebs -c azure-config.json exists my-blob.txt + +# Get properties of an object +storage-cli -s azurebs -c azure-config.json properties my-blob.txt + +# Sign object for 'get' in alioss for 60 seconds +storage-cli -s alioss -c alioss-config.json sign object.txt get 60s +``` + +## Contributing + +Follow these steps to make a contribution to the project: + +- Fork this repository +- Create a feature branch based upon the `main` branch (*pull requests must be made against this branch*) + ``` bash + git checkout -b feature-name origin/main + ``` +- Run tests to check your development environment setup + ``` bash + ginkgo --race --skip-package=integration --cover -v -r ./... + ``` +- Make your changes (*be sure to add/update tests*) +- Run tests to check your changes + ``` bash + ginkgo --race --skip-package=integration --cover -v -r ./... + ``` +- If you added or modified integration tests, to run them locally, follow the instructions in the provider-specific README (see [Providers](#providers) section) +- Push changes to your fork + ``` bash + git add . + git commit -m "Commit message" + git push origin feature-name + ``` +- Create a GitHub pull request, selecting `main` as the target branch ## Notes diff --git a/alioss/README.md b/alioss/README.md index a8f401a..595b68b 100644 --- a/alioss/README.md +++ b/alioss/README.md @@ -1,11 +1,14 @@ -# Ali Storage CLI +# Alibaba Cloud OSS Client -The Ali Storage CLI is for uploading, fetching and deleting content to and from an Ali OSS. -It is highly inspired by the [storage-cli/s3](https://github.com/cloudfoundry/storage-cli/blob/6058f516e9b81471b64a50b01e228158a05731f0/s3) +Alibaba Cloud OSS (Object Storage Service) client implementation for the unified storage-cli tool. This module provides Alibaba Cloud OSS operations through the main storage-cli binary. -## Usage +**Note:** This is not a standalone CLI. Use the main `storage-cli` binary with `-s alioss` flag to access AliOSS functionality. -Given a JSON config file (`config.json`)... +For general usage and build instructions, see the [main README](../README.md). + +## AliOSS-Specific Configuration + +The AliOSS client requires a JSON configuration file with the following structure: ``` json { @@ -16,46 +19,40 @@ Given a JSON config file (`config.json`)... } ``` +**Usage examples:** ``` bash -# Command: "put" -# Upload a blob to the blobstore. -./alioss-cli -c config.json put - -# Command: "get" -# Fetch a blob from the blobstore. -# Destination file will be overwritten if exists. -./alioss-cli -c config.json get - -# Command: "delete" -# Remove a blob from the blobstore. -./alioss-cli -c config.json delete - -# Command: "exists" -# Checks if blob exists in the blobstore. -./alioss-cli -c config.json exists - -# Command: "sign" -# Create a self-signed url for a blob in the blobstore. -./alioss-cli -c config.json sign +# Upload a blob +storage-cli -s alioss -c alioss-config.json put local-file.txt remote-blob + +# Fetch a blob (destination file will be overwritten if exists) +storage-cli -s alioss -c alioss-config.json get remote-blob local-file.txt + +# Delete a blob +storage-cli -s alioss -c alioss-config.json delete remote-blob + +# Check if blob exists +storage-cli -s alioss -c alioss-config.json exists remote-blob + +# Generate a signed URL (e.g., GET for 3600 seconds) +storage-cli -s alioss -c alioss-config.json sign remote-blob get 3600s ``` -### Using signed urls with curl +### Using Signed URLs with curl ``` bash # Uploading a blob: -curl -X PUT -T path/to/file +curl -X PUT -T path/to/file # Downloading a blob: -curl -X GET +curl -X GET ``` -## Running Tests + +## Testing ### Unit Tests -**Note:** Run the following commands from the repository root directory. +Run from the repository root directory: ```bash -go install github.com/onsi/ginkgo/v2/ginkgo - -ginkgo --skip-package=integration --randomize-all --cover -v -r ./alioss/... +ginkgo --skip-package=integration --cover -v -r ./alioss/... ``` ### Integration Tests diff --git a/alioss/client/client.go b/alioss/client/client.go index f48d2ce..712a42a 100644 --- a/alioss/client/client.go +++ b/alioss/client/client.go @@ -3,11 +3,13 @@ package client import ( "crypto/md5" "encoding/base64" + "errors" "fmt" "io" "log" "os" "strings" + "time" ) type AliBlobstore struct { @@ -33,8 +35,8 @@ func (client *AliBlobstore) Put(sourceFilePath string, destinationObject string) return nil } -func (client *AliBlobstore) Get(sourceObject string, destinationFilePath string) error { - return client.storageClient.Download(sourceObject, destinationFilePath) +func (client *AliBlobstore) Get(sourceObject string, dest string) error { + return client.storageClient.Download(sourceObject, dest) } func (client *AliBlobstore) Delete(object string) error { @@ -45,8 +47,9 @@ func (client *AliBlobstore) Exists(object string) (bool, error) { return client.storageClient.Exists(object) } -func (client *AliBlobstore) Sign(object string, action string, expiredInSec int64) (string, error) { +func (client *AliBlobstore) Sign(object string, action string, expiration time.Duration) (string, error) { action = strings.ToUpper(action) + expiredInSec := int64(expiration.Seconds()) switch action { case "PUT": return client.storageClient.SignedUrlPut(object, expiredInSec) @@ -75,3 +78,23 @@ func (client *AliBlobstore) getMD5(filePath string) (string, error) { return md5, nil } + +func (client *AliBlobstore) List(prefix string) ([]string, error) { + return nil, errors.New("not implemented") +} + +func (client *AliBlobstore) Copy(srcBlob string, dstBlob string) error { + return errors.New("not implemented") +} + +func (client *AliBlobstore) Properties(dest string) error { + return errors.New("not implemented") +} + +func (client *AliBlobstore) EnsureStorageExists() error { + return errors.New("not implemented") +} + +func (client *AliBlobstore) DeleteRecursive(prefix string) error { + return errors.New("not implemented") +} diff --git a/alioss/client/client_suite_test.go b/alioss/client/client_suite_test.go index 79e004c..4e8cdef 100644 --- a/alioss/client/client_suite_test.go +++ b/alioss/client/client_suite_test.go @@ -9,5 +9,5 @@ import ( func TestClient(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "Client Suite") + RunSpecs(t, "Alioss Client Suite") } diff --git a/alioss/client/client_test.go b/alioss/client/client_test.go index f4e94e1..f43b298 100644 --- a/alioss/client/client_test.go +++ b/alioss/client/client_test.go @@ -3,6 +3,7 @@ package client_test import ( "errors" "os" + "time" "github.com/cloudfoundry/storage-cli/alioss/client" "github.com/cloudfoundry/storage-cli/alioss/client/clientfakes" @@ -110,19 +111,25 @@ var _ = Describe("Client", func() { }) Context("signed url", func() { + var expiry time.Duration + + BeforeEach(func() { + expiry = 100 * time.Second + }) + It("returns a signed url for action 'get'", func() { storageClient := clientfakes.FakeStorageClient{} storageClient.SignedUrlGetReturns("https://the-signed-url", nil) aliBlobstore, err := client.New(&storageClient) Expect(err).NotTo(HaveOccurred()) - url, err := aliBlobstore.Sign("blob", "get", 100) + url, err := aliBlobstore.Sign("blob", "get", expiry) Expect(url == "https://the-signed-url").To(BeTrue()) Expect(err).ToNot(HaveOccurred()) object, expiration := storageClient.SignedUrlGetArgsForCall(0) Expect(object).To(Equal("blob")) - Expect(int(expiration)).To(Equal(100)) + Expect(int(expiration)).To(Equal(int(expiry.Seconds()))) }) It("returns a signed url for action 'put'", func() { @@ -131,13 +138,13 @@ var _ = Describe("Client", func() { aliBlobstore, err := client.New(&storageClient) Expect(err).NotTo(HaveOccurred()) - url, err := aliBlobstore.Sign("blob", "put", 100) + url, err := aliBlobstore.Sign("blob", "put", expiry) Expect(url == "https://the-signed-url").To(BeTrue()) Expect(err).ToNot(HaveOccurred()) object, expiration := storageClient.SignedUrlPutArgsForCall(0) Expect(object).To(Equal("blob")) - Expect(int(expiration)).To(Equal(100)) + Expect(int(expiration)).To(Equal(int(expiry.Seconds()))) }) It("fails on unknown action", func() { @@ -146,7 +153,7 @@ var _ = Describe("Client", func() { aliBlobstore, err := client.New(&storageClient) Expect(err).NotTo(HaveOccurred()) - url, err := aliBlobstore.Sign("blob", "unknown", 100) + url, err := aliBlobstore.Sign("blob", "unknown", expiry) Expect(url).To(Equal("")) Expect(err).To(HaveOccurred()) diff --git a/alioss/config/config_suite_test.go b/alioss/config/config_suite_test.go index c6e29ba..673b7cc 100644 --- a/alioss/config/config_suite_test.go +++ b/alioss/config/config_suite_test.go @@ -9,5 +9,5 @@ import ( func TestConfig(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "Config Suite") + RunSpecs(t, "Alioss Config Suite") } diff --git a/alioss/integration/general_ali_test.go b/alioss/integration/general_ali_test.go index 7a25640..cb64833 100644 --- a/alioss/integration/general_ali_test.go +++ b/alioss/integration/general_ali_test.go @@ -17,6 +17,7 @@ var _ = Describe("General testing for all Ali regions", func() { var blobName string var configPath string var contentFile string + var storageType = "alioss" BeforeEach(func() { blobName = integration.GenerateRandomString() @@ -32,16 +33,16 @@ var _ = Describe("General testing for all Ali regions", func() { Describe("Invoking `put`", func() { It("uploads a file", func() { defer func() { - cliSession, err := integration.RunCli(cliPath, configPath, "delete", blobName) + cliSession, err := integration.RunCli(cliPath, configPath, storageType, "delete", blobName) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) }() - cliSession, err := integration.RunCli(cliPath, configPath, "put", contentFile, blobName) + cliSession, err := integration.RunCli(cliPath, configPath, storageType, "put", contentFile, blobName) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) - cliSession, err = integration.RunCli(cliPath, configPath, "exists", blobName) + cliSession, err = integration.RunCli(cliPath, configPath, storageType, "exists", blobName) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) @@ -50,7 +51,7 @@ var _ = Describe("General testing for all Ali regions", func() { It("overwrites an existing file", func() { defer func() { - cliSession, err := integration.RunCli(cliPath, configPath, "delete", blobName) + cliSession, err := integration.RunCli(cliPath, configPath, storageType, "delete", blobName) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) }() @@ -60,11 +61,11 @@ var _ = Describe("General testing for all Ali regions", func() { defer func() { _ = os.Remove(tmpLocalFile.Name()) }() //nolint:errcheck contentFile = integration.MakeContentFile("initial content") - cliSession, err := integration.RunCli(cliPath, configPath, "put", contentFile, blobName) + cliSession, err := integration.RunCli(cliPath, configPath, storageType, "put", contentFile, blobName) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) - cliSession, err = integration.RunCli(cliPath, configPath, "get", blobName, tmpLocalFile.Name()) + cliSession, err = integration.RunCli(cliPath, configPath, storageType, "get", blobName, tmpLocalFile.Name()) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) @@ -72,11 +73,11 @@ var _ = Describe("General testing for all Ali regions", func() { Expect(string(gottenBytes)).To(Equal("initial content")) contentFile = integration.MakeContentFile("updated content") - cliSession, err = integration.RunCli(cliPath, configPath, "put", contentFile, blobName) + cliSession, err = integration.RunCli(cliPath, configPath, storageType, "put", contentFile, blobName) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) - cliSession, err = integration.RunCli(cliPath, configPath, "get", blobName, tmpLocalFile.Name()) + cliSession, err = integration.RunCli(cliPath, configPath, storageType, "get", blobName, tmpLocalFile.Name()) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) @@ -94,7 +95,7 @@ var _ = Describe("General testing for all Ali regions", func() { configPath = integration.MakeConfigFile(cfg) - cliSession, err := integration.RunCli(cliPath, configPath, "put", contentFile, blobName) + cliSession, err := integration.RunCli(cliPath, configPath, storageType, "put", contentFile, blobName) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(Equal(1)) @@ -108,18 +109,18 @@ var _ = Describe("General testing for all Ali regions", func() { outputFilePath := "/tmp/" + integration.GenerateRandomString() defer func() { - cliSession, err := integration.RunCli(cliPath, configPath, "delete", blobName) + cliSession, err := integration.RunCli(cliPath, configPath, storageType, "delete", blobName) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) _ = os.Remove(outputFilePath) //nolint:errcheck }() - cliSession, err := integration.RunCli(cliPath, configPath, "put", contentFile, blobName) + cliSession, err := integration.RunCli(cliPath, configPath, storageType, "put", contentFile, blobName) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) - cliSession, err = integration.RunCli(cliPath, configPath, "get", blobName, outputFilePath) + cliSession, err = integration.RunCli(cliPath, configPath, storageType, "get", blobName, outputFilePath) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) @@ -131,20 +132,20 @@ var _ = Describe("General testing for all Ali regions", func() { Describe("Invoking `delete`", func() { It("deletes a file", func() { defer func() { - cliSession, err := integration.RunCli(cliPath, configPath, "delete", blobName) + cliSession, err := integration.RunCli(cliPath, configPath, storageType, "delete", blobName) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) }() - cliSession, err := integration.RunCli(cliPath, configPath, "put", contentFile, blobName) + cliSession, err := integration.RunCli(cliPath, configPath, storageType, "put", contentFile, blobName) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) - cliSession, err = integration.RunCli(cliPath, configPath, "delete", blobName) + cliSession, err = integration.RunCli(cliPath, configPath, storageType, "delete", blobName) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) - cliSession, err = integration.RunCli(cliPath, configPath, "exists", blobName) + cliSession, err = integration.RunCli(cliPath, configPath, storageType, "exists", blobName) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(Equal(3)) }) @@ -153,22 +154,22 @@ var _ = Describe("General testing for all Ali regions", func() { Describe("Invoking `exists`", func() { It("returns 0 for an existing blob", func() { defer func() { - cliSession, err := integration.RunCli(cliPath, configPath, "delete", blobName) + cliSession, err := integration.RunCli(cliPath, configPath, storageType, "delete", blobName) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) }() - cliSession, err := integration.RunCli(cliPath, configPath, "put", contentFile, blobName) + cliSession, err := integration.RunCli(cliPath, configPath, storageType, "put", contentFile, blobName) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) - cliSession, err = integration.RunCli(cliPath, configPath, "exists", blobName) + cliSession, err = integration.RunCli(cliPath, configPath, storageType, "exists", blobName) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(Equal(0)) }) It("returns 3 for a not existing blob", func() { - cliSession, err := integration.RunCli(cliPath, configPath, "exists", blobName) + cliSession, err := integration.RunCli(cliPath, configPath, storageType, "exists", blobName) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(Equal(3)) }) @@ -176,14 +177,14 @@ var _ = Describe("General testing for all Ali regions", func() { Describe("Invoking `sign`", func() { It("returns 0 for an existing blob", func() { - cliSession, err := integration.RunCli(cliPath, configPath, "sign", "some-blob", "get", "60s") + cliSession, err := integration.RunCli(cliPath, configPath, storageType, "sign", "some-blob", "get", "60s") Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) getUrl := bytes.NewBuffer(cliSession.Out.Contents()).String() Expect(getUrl).To(MatchRegexp("http://" + bucketName + "." + endpoint + "/some-blob")) - cliSession, err = integration.RunCli(cliPath, configPath, "sign", "some-blob", "put", "60s") + cliSession, err = integration.RunCli(cliPath, configPath, storageType, "sign", "some-blob", "put", "60s") Expect(err).ToNot(HaveOccurred()) putUrl := bytes.NewBuffer(cliSession.Out.Contents()).String() @@ -191,7 +192,7 @@ var _ = Describe("General testing for all Ali regions", func() { }) It("returns 3 for a not existing blob", func() { - cliSession, err := integration.RunCli(cliPath, configPath, "exists", blobName) + cliSession, err := integration.RunCli(cliPath, configPath, storageType, "exists", blobName) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(Equal(3)) }) @@ -202,7 +203,7 @@ var _ = Describe("General testing for all Ali regions", func() { configPath := integration.MakeConfigFile(&defaultConfig) defer func() { _ = os.Remove(configPath) }() //nolint:errcheck - cliSession, err := integration.RunCli(cliPath, configPath, "-v") + cliSession, err := integration.RunCli(cliPath, configPath, storageType, "-v") Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(Equal(0)) diff --git a/alioss/integration/integration_suite_test.go b/alioss/integration/integration_suite_test.go index a8e4fa2..0d007e4 100644 --- a/alioss/integration/integration_suite_test.go +++ b/alioss/integration/integration_suite_test.go @@ -26,7 +26,7 @@ var defaultConfig config.AliStorageConfig var _ = BeforeSuite(func() { if len(cliPath) == 0 { var err error - cliPath, err = gexec.Build("github.com/cloudfoundry/storage-cli/alioss") + cliPath, err = gexec.Build("github.com/cloudfoundry/storage-cli") Expect(err).ShouldNot(HaveOccurred()) } diff --git a/alioss/integration/utils.go b/alioss/integration/utils.go index c689f85..c32cb6f 100644 --- a/alioss/integration/utils.go +++ b/alioss/integration/utils.go @@ -51,10 +51,12 @@ func MakeContentFile(content string) string { return tmpFile.Name() } -func RunCli(cliPath string, configPath string, subcommand string, args ...string) (*gexec.Session, error) { +func RunCli(cliPath string, configPath string, storageType string, subcommand string, args ...string) (*gexec.Session, error) { cmdArgs := []string{ "-c", configPath, + "-s", + storageType, subcommand, } cmdArgs = append(cmdArgs, args...) diff --git a/alioss/main.go b/alioss/main.go deleted file mode 100644 index 348de6d..0000000 --- a/alioss/main.go +++ /dev/null @@ -1,135 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "log" - "os" - "time" - - "github.com/cloudfoundry/storage-cli/alioss/client" - "github.com/cloudfoundry/storage-cli/alioss/config" -) - -var version string - -func main() { - - configPath := flag.String("c", "", "configuration path") - showVer := flag.Bool("v", false, "version") - flag.Parse() - - if *showVer { - fmt.Printf("version %s\n", version) - os.Exit(0) - } - - configFile, err := os.Open(*configPath) - if err != nil { - log.Fatalln(err) - } - - aliConfig, err := config.NewFromReader(configFile) - if err != nil { - log.Fatalln(err) - } - - storageClient, err := client.NewStorageClient(aliConfig) - if err != nil { - log.Fatalln(err) - } - - blobstoreClient, err := client.New(storageClient) - if err != nil { - log.Fatalln(err) - } - - nonFlagArgs := flag.Args() - if len(nonFlagArgs) < 2 { - log.Fatalf("Expected at least two arguments got %d\n", len(nonFlagArgs)) - } - - cmd := nonFlagArgs[0] - - switch cmd { - case "put": - if len(nonFlagArgs) != 3 { - log.Fatalf("Put method expected 3 arguments got %d\n", len(nonFlagArgs)) - } - sourceFilePath, destination := nonFlagArgs[1], nonFlagArgs[2] - - _, err := os.Stat(sourceFilePath) - if err != nil { - log.Fatalln(err) - } - - err = blobstoreClient.Put(sourceFilePath, destination) - fatalLog(cmd, err) - - case "get": - if len(nonFlagArgs) != 3 { - log.Fatalf("Get method expected 3 arguments got %d\n", len(nonFlagArgs)) - } - source, destinationFilePath := nonFlagArgs[1], nonFlagArgs[2] - - err = blobstoreClient.Get(source, destinationFilePath) - fatalLog(cmd, err) - - case "delete": - if len(nonFlagArgs) != 2 { - log.Fatalf("Delete method expected 2 arguments got %d\n", len(nonFlagArgs)) - } - - err = blobstoreClient.Delete(nonFlagArgs[1]) - fatalLog(cmd, err) - - case "exists": - if len(nonFlagArgs) != 2 { - log.Fatalf("Exists method expected 2 arguments got %d\n", len(nonFlagArgs)) - } - - var exists bool - exists, err = blobstoreClient.Exists(nonFlagArgs[1]) - - // If the object exists the exit status is 0, otherwise it is 3 - // We are using `3` since `1` and `2` have special meanings - if err == nil && !exists { - os.Exit(3) - } - - case "sign": - if len(nonFlagArgs) != 4 { - log.Fatalf("Sign method expects 3 arguments got %d\n", len(nonFlagArgs)-1) - } - - object, action := nonFlagArgs[1], nonFlagArgs[2] - - if action != "get" && action != "put" { - log.Fatalf("Action not implemented: %s. Available actions are 'get' and 'put'", action) - } - - duration, err := time.ParseDuration(nonFlagArgs[3]) - if err != nil { - log.Fatalf("Expiration should be in the format of a duration i.e. 1h, 60m, 3600s. Got: %s", nonFlagArgs[3]) - } - - expiredInSec := int64(duration.Seconds()) - signedURL, err := blobstoreClient.Sign(object, action, expiredInSec) - - if err != nil { - log.Fatalf("Failed to sign request: %s", err) - } - - fmt.Println(signedURL) - os.Exit(0) - - default: - log.Fatalf("unknown command: '%s'\n", cmd) - } -} - -func fatalLog(cmd string, err error) { - if err != nil { - log.Fatalf("performing operation %s: %s\n", cmd, err) - } -} diff --git a/azurebs/README.md b/azurebs/README.md index 7977ba0..500662a 100644 --- a/azurebs/README.md +++ b/azurebs/README.md @@ -1,72 +1,65 @@ -# Azure Storage CLI +# Azure Blob Storage Client -The Azure Storage CLI is for uploading, fetching and deleting content to and from an Azure blobstore. -It is highly inspired by the [storage-cli/s3](https://github.com/cloudfoundry/storage-cli/blob/6058f516e9b81471b64a50b01e228158a05731f0/s3) +Azure Blob Storage client implementation for the unified storage-cli tool. This module provides Azure Blob Storage operations through the main storage-cli binary. -## Usage +**Note:** This is not a standalone CLI. Use the main `storage-cli` binary with `-s azurebs` flag to access Azure Blob Storage functionality. -Given a JSON config file (`config.json`)... +For general usage and build instructions, see the [main README](../README.md). + +## Azure-Specific Configuration + +The Azure client requires a JSON configuration file with the following structure: ``` json { "account_name": " (required)", "account_key": " (required)", "container_name": " (required)", - "environment": " (optional, default: 'AzureCloud')", + "environment": " (optional, default: 'AzureCloud')" } ``` +**Usage examples:** ``` bash -# Command: "put" -# Upload a blob to the blobstore. -./azurebs-cli -c config.json put - -# Command: "get" -# Fetch a blob from the blobstore. -# Destination file will be overwritten if exists. -./azurebs-cli -c config.json get - -# Command: "delete" -# Remove a blob from the blobstore. -./azurebs-cli -c config.json delete - -# Command: "exists" -# Checks if blob exists in the blobstore. -./azurebs-cli -c config.json exists - -# Command: "sign" -# Create a self-signed url for a blob in the blobstore. -./azurebs-cli -c config.json sign +# Upload a blob +storage-cli -s azurebs -c azure-config.json put local-file.txt remote-blob + +# Fetch a blob (destination file will be overwritten if exists) +storage-cli -s azurebs -c azure-config.json get remote-blob local-file.txt + +# Delete a blob +storage-cli -s azurebs -c azure-config.json delete remote-blob + +# Check if blob exists +storage-cli -s azurebs -c azure-config.json exists remote-blob + +# Generate a signed URL (e.g., GET for 3600 seconds) +storage-cli -s azurebs -c azure-config.json sign remote-blob get 3600s ``` -### Using signed urls with curl +### Using Signed URLs with curl ``` bash # Uploading a blob: -curl -X PUT -H "x-ms-blob-type: blockblob" -F 'fileX=' +curl -X PUT -H "x-ms-blob-type: blockblob" -F 'fileX=' # Downloading a blob: -curl -X GET +curl -X GET ``` -## Running Tests +## Testing ### Unit Tests -**Note:** Run the following commands from the repository root directory +Run from the repository root directory: -- Using ginkgo: - - ``` bash - go install github.com/onsi/ginkgo/v2/ginkgo - - ginkgo --skip-package=integration --randomize-all --cover -v -r ./azurebs/... - ``` - -- Using go test: +```bash +ginkgo --skip-package=integration --randomize-all --cover -v -r ./azurebs/... +``` - ``` bash - go test $(go list ./azurebs/... | grep -v integration) - ``` +Or using go test: +```bash +go test $(go list ./azurebs/... | grep -v integration) +``` ### Integration Tests - To run the integration tests with your existing container diff --git a/azurebs/client/client.go b/azurebs/client/client.go index 5021e6e..a40cb4b 100644 --- a/azurebs/client/client.go +++ b/azurebs/client/client.go @@ -29,7 +29,6 @@ func (client *AzBlobstore) Put(sourceFilePath string, dest string) error { if err != nil { return err } - defer source.Close() //nolint:errcheck md5, err := client.storageClient.Upload(source, dest) @@ -52,9 +51,14 @@ func (client *AzBlobstore) Put(sourceFilePath string, dest string) error { return nil } -func (client *AzBlobstore) Get(source string, dest *os.File) error { +func (client *AzBlobstore) Get(source string, dest string) error { + dstFile, err := os.Create(dest) + if err != nil { + log.Fatalln(err) + } + defer dstFile.Close() //nolint:errcheck - return client.storageClient.Download(source, dest) + return client.storageClient.Download(source, dstFile) } func (client *AzBlobstore) Delete(dest string) error { @@ -113,7 +117,7 @@ func (client *AzBlobstore) Properties(dest string) error { return client.storageClient.Properties(dest) } -func (client *AzBlobstore) EnsureContainerExists() error { +func (client *AzBlobstore) EnsureStorageExists() error { return client.storageClient.EnsureContainerExists() } diff --git a/azurebs/client/client_suite_test.go b/azurebs/client/client_suite_test.go index 79e004c..4fbfe94 100644 --- a/azurebs/client/client_suite_test.go +++ b/azurebs/client/client_suite_test.go @@ -9,5 +9,5 @@ import ( func TestClient(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "Client Suite") + RunSpecs(t, "Azurebs Client Suite") } diff --git a/azurebs/client/client_test.go b/azurebs/client/client_test.go index 9d2df7b..851c6a9 100644 --- a/azurebs/client/client_test.go +++ b/azurebs/client/client_test.go @@ -79,15 +79,16 @@ var _ = Describe("Client", func() { azBlobstore, err := client.New(&storageClient) Expect(err).ToNot(HaveOccurred()) - file, _ := os.CreateTemp("", "tmpfile") //nolint:errcheck + dstFileName := "tmp-dest-azurebs-get" + defer os.Remove("tmp-dest-azurebs-get") //nolint:errcheck - azBlobstore.Get("source/blob", file) //nolint:errcheck + azBlobstore.Get("source/blob", dstFileName) //nolint:errcheck Expect(storageClient.DownloadCallCount()).To(Equal(1)) - source, dest := storageClient.DownloadArgsForCall(0) + source, dest := storageClient.DownloadArgsForCall(0) Expect(source).To(Equal("source/blob")) - Expect(dest).To(Equal(file)) + Expect(dest.Name()).To(Equal(dstFileName)) }) It("delete blob deletes the blob", func() { diff --git a/azurebs/config/config_suite_test.go b/azurebs/config/config_suite_test.go index c6e29ba..9761c11 100644 --- a/azurebs/config/config_suite_test.go +++ b/azurebs/config/config_suite_test.go @@ -9,5 +9,5 @@ import ( func TestConfig(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "Config Suite") + RunSpecs(t, "Azurebs Config Suite") } diff --git a/azurebs/integration/assertions.go b/azurebs/integration/assertions.go index 7fb90bb..003c44a 100644 --- a/azurebs/integration/assertions.go +++ b/azurebs/integration/assertions.go @@ -5,10 +5,11 @@ import ( "os" "github.com/cloudfoundry/storage-cli/azurebs/config" - . "github.com/onsi/gomega" //nolint:staticcheck ) +var storageType = "azurebs" + func AssertPutUsesNoTimeout(cliPath string, cfg *config.AZStorageConfig) { cfg2 := *cfg cfg2.Timeout = "" // unset -> no timeout @@ -19,13 +20,13 @@ func AssertPutUsesNoTimeout(cliPath string, cfg *config.AZStorageConfig) { defer os.Remove(content) //nolint:errcheck blob := GenerateRandomString() - sess, err := RunCli(cliPath, configPath, "put", content, blob) + sess, err := RunCli(cliPath, configPath, storageType, "put", content, blob) Expect(err).ToNot(HaveOccurred()) Expect(sess.ExitCode()).To(BeZero()) Expect(string(sess.Err.Contents())).To(ContainSubstring("Uploading ")) // stderr has log.Println Expect(string(sess.Err.Contents())).To(ContainSubstring("with no timeout")) - sess, err = RunCli(cliPath, configPath, "delete", blob) + sess, err = RunCli(cliPath, configPath, storageType, "delete", blob) Expect(err).ToNot(HaveOccurred()) Expect(sess.ExitCode()).To(BeZero()) } @@ -40,12 +41,12 @@ func AssertPutHonorsCustomTimeout(cliPath string, cfg *config.AZStorageConfig) { defer os.Remove(content) //nolint:errcheck blob := GenerateRandomString() - sess, err := RunCli(cliPath, configPath, "put", content, blob) + sess, err := RunCli(cliPath, configPath, storageType, "put", content, blob) Expect(err).ToNot(HaveOccurred()) Expect(sess.ExitCode()).To(BeZero()) Expect(string(sess.Err.Contents())).To(ContainSubstring("with a timeout of 3s")) - sess, err = RunCli(cliPath, configPath, "delete", blob) + sess, err = RunCli(cliPath, configPath, storageType, "delete", blob) Expect(err).ToNot(HaveOccurred()) Expect(sess.ExitCode()).To(BeZero()) } @@ -62,7 +63,7 @@ func AssertPutTimesOut(cliPath string, cfg *config.AZStorageConfig) { defer os.Remove(content) //nolint:errcheck blob := GenerateRandomString() - sess, err := RunCli(cliPath, configPath, "put", content, blob) + sess, err := RunCli(cliPath, configPath, storageType, "put", content, blob) Expect(err).ToNot(HaveOccurred()) Expect(sess.ExitCode()).ToNot(BeZero()) Expect(string(sess.Err.Contents())).To(ContainSubstring("timeout of 1 reached while uploading")) @@ -78,7 +79,7 @@ func AssertInvalidTimeoutIsError(cliPath string, cfg *config.AZStorageConfig) { defer os.Remove(content) //nolint:errcheck blob := GenerateRandomString() - sess, err := RunCli(cliPath, configPath, "put", content, blob) + sess, err := RunCli(cliPath, configPath, storageType, "put", content, blob) Expect(err).ToNot(HaveOccurred()) Expect(sess.ExitCode()).ToNot(BeZero()) Expect(string(sess.Err.Contents())).To(ContainSubstring(`Invalid timeout format "bananas"`)) @@ -94,7 +95,7 @@ func AssertZeroTimeoutIsError(cliPath string, cfg *config.AZStorageConfig) { defer os.Remove(content) //nolint:errcheck blob := GenerateRandomString() - sess, err := RunCli(cliPath, configPath, "put", content, blob) + sess, err := RunCli(cliPath, configPath, storageType, "put", content, blob) Expect(err).ToNot(HaveOccurred()) Expect(sess.ExitCode()).ToNot(BeZero()) @@ -111,7 +112,7 @@ func AssertNegativeTimeoutIsError(cliPath string, cfg *config.AZStorageConfig) { defer os.Remove(content) //nolint:errcheck blob := GenerateRandomString() - sess, err := RunCli(cliPath, configPath, "put", content, blob) + sess, err := RunCli(cliPath, configPath, storageType, "put", content, blob) Expect(err).ToNot(HaveOccurred()) Expect(sess.ExitCode()).ToNot(BeZero()) @@ -122,26 +123,26 @@ func AssertSignedURLTimeouts(cliPath string, cfg *config.AZStorageConfig) { configPath := MakeConfigFile(cfg) defer os.Remove(configPath) //nolint:errcheck - sess, err := RunCli(cliPath, configPath, "sign", "some-blob", "get", "60s") + sess, err := RunCli(cliPath, configPath, storageType, "sign", "some-blob", "get", "60s") Expect(err).ToNot(HaveOccurred()) url := string(sess.Out.Contents()) Expect(url).To(ContainSubstring("timeout=1800")) - sess, err = RunCli(cliPath, configPath, "sign", "some-blob", "put", "60s") + sess, err = RunCli(cliPath, configPath, storageType, "sign", "some-blob", "put", "60s") Expect(err).ToNot(HaveOccurred()) url = string(sess.Out.Contents()) Expect(url).To(ContainSubstring("timeout=2700")) } -func AssertEnsureBucketIdempotent(cliPath string, cfg *config.AZStorageConfig) { +func AssertEnsureStorageIdempotent(cliPath string, cfg *config.AZStorageConfig) { configPath := MakeConfigFile(cfg) defer os.Remove(configPath) //nolint:errcheck - s1, err := RunCli(cliPath, configPath, "ensure-bucket-exists") + s1, err := RunCli(cliPath, configPath, storageType, "ensure-storage-exists") Expect(err).ToNot(HaveOccurred()) Expect(s1.ExitCode()).To(BeZero()) - s2, err := RunCli(cliPath, configPath, "ensure-bucket-exists") + s2, err := RunCli(cliPath, configPath, storageType, "ensure-storage-exists") Expect(err).ToNot(HaveOccurred()) Expect(s2.ExitCode()).To(BeZero()) } @@ -155,7 +156,7 @@ func AssertPutGetWithSpecialNames(cliPath string, cfg *config.AZStorageConfig) { f := MakeContentFile(content) defer os.Remove(f) //nolint:errcheck - s, err := RunCli(cliPath, configPath, "put", f, name) + s, err := RunCli(cliPath, configPath, storageType, "put", f, name) Expect(err).ToNot(HaveOccurred()) Expect(s.ExitCode()).To(BeZero()) @@ -163,14 +164,14 @@ func AssertPutGetWithSpecialNames(cliPath string, cfg *config.AZStorageConfig) { tmp.Close() //nolint:errcheck defer os.Remove(tmp.Name()) //nolint:errcheck - s, err = RunCli(cliPath, configPath, "get", name, tmp.Name()) + s, err = RunCli(cliPath, configPath, storageType, "get", name, tmp.Name()) Expect(err).ToNot(HaveOccurred()) Expect(s.ExitCode()).To(BeZero()) b, _ := os.ReadFile(tmp.Name()) //nolint:errcheck Expect(string(b)).To(Equal(content)) - s, err = RunCli(cliPath, configPath, "delete", name) + s, err = RunCli(cliPath, configPath, storageType, "delete", name) Expect(err).ToNot(HaveOccurred()) Expect(s.ExitCode()).To(BeZero()) } @@ -186,21 +187,21 @@ func AssertLifecycleWorks(cliPath string, cfg *config.AZStorageConfig) { defer os.Remove(contentFile) //nolint:errcheck // Ensure container/bucket exists - cliSession, err := RunCli(cliPath, configPath, "ensure-bucket-exists") + cliSession, err := RunCli(cliPath, configPath, storageType, "ensure-storage-exists") Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) - cliSession, err = RunCli(cliPath, configPath, "put", contentFile, blobName) + cliSession, err = RunCli(cliPath, configPath, storageType, "put", contentFile, blobName) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) - cliSession, err = RunCli(cliPath, configPath, "exists", blobName) + cliSession, err = RunCli(cliPath, configPath, storageType, "exists", blobName) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) Expect(cliSession.Err.Contents()).To(MatchRegexp("File '.*' exists in bucket '.*'")) // Check blob properties - cliSession, err = RunCli(cliPath, configPath, "properties", blobName) + cliSession, err = RunCli(cliPath, configPath, storageType, "properties", blobName) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) output := string(cliSession.Out.Contents()) @@ -214,7 +215,7 @@ func AssertLifecycleWorks(cliPath string, cfg *config.AZStorageConfig) { Expect(err).ToNot(HaveOccurred()) defer os.Remove(tmpLocalFile.Name()) //nolint:errcheck - cliSession, err = RunCli(cliPath, configPath, "get", blobName, tmpLocalFile.Name()) + cliSession, err = RunCli(cliPath, configPath, storageType, "get", blobName, tmpLocalFile.Name()) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) @@ -222,16 +223,16 @@ func AssertLifecycleWorks(cliPath string, cfg *config.AZStorageConfig) { Expect(err).ToNot(HaveOccurred()) Expect(string(gottenBytes)).To(Equal(expectedString)) - cliSession, err = RunCli(cliPath, configPath, "delete", blobName) + cliSession, err = RunCli(cliPath, configPath, storageType, "delete", blobName) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) - cliSession, err = RunCli(cliPath, configPath, "exists", blobName) + cliSession, err = RunCli(cliPath, configPath, storageType, "exists", blobName) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(Equal(3)) Expect(cliSession.Err.Contents()).To(MatchRegexp("File '.*' does not exist in bucket '.*'")) - cliSession, err = RunCli(cliPath, configPath, "properties", blobName) + cliSession, err = RunCli(cliPath, configPath, storageType, "properties", blobName) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(Equal(0)) Expect(cliSession.Out.Contents()).To(MatchRegexp("{}")) @@ -241,7 +242,7 @@ func AssertOnCliVersion(cliPath string, cfg *config.AZStorageConfig) { configPath := MakeConfigFile(cfg) defer os.Remove(configPath) //nolint:errcheck - cliSession, err := RunCli(cliPath, configPath, "-v") + cliSession, err := RunCli(cliPath, configPath, storageType, "-v") Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(Equal(0)) @@ -253,7 +254,7 @@ func AssertGetNonexistentFails(cliPath string, cfg *config.AZStorageConfig) { configPath := MakeConfigFile(cfg) defer os.Remove(configPath) //nolint:errcheck - cliSession, err := RunCli(cliPath, configPath, "get", "non-existent-file", "/dev/null") + cliSession, err := RunCli(cliPath, configPath, storageType, "get", "non-existent-file", "/dev/null") Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).ToNot(BeZero()) } @@ -262,7 +263,7 @@ func AssertDeleteNonexistentWorks(cliPath string, cfg *config.AZStorageConfig) { configPath := MakeConfigFile(cfg) defer os.Remove(configPath) //nolint:errcheck - cliSession, err := RunCli(cliPath, configPath, "delete", "non-existent-file") + cliSession, err := RunCli(cliPath, configPath, storageType, "delete", "non-existent-file") Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) } @@ -273,13 +274,13 @@ func AssertOnSignedURLs(cliPath string, cfg *config.AZStorageConfig) { regex := "https://" + cfg.AccountName + ".blob.*/" + cfg.ContainerName + "/some-blob.*" - cliSession, err := RunCli(cliPath, configPath, "sign", "some-blob", "get", "60s") + cliSession, err := RunCli(cliPath, configPath, storageType, "sign", "some-blob", "get", "60s") Expect(err).ToNot(HaveOccurred()) getUrl := bytes.NewBuffer(cliSession.Out.Contents()).String() Expect(getUrl).To(MatchRegexp(regex)) - cliSession, err = RunCli(cliPath, configPath, "sign", "some-blob", "put", "60s") + cliSession, err = RunCli(cliPath, configPath, storageType, "sign", "some-blob", "put", "60s") Expect(err).ToNot(HaveOccurred()) putUrl := bytes.NewBuffer(cliSession.Out.Contents()).String() @@ -290,10 +291,10 @@ func AssertOnListDeleteLifecyle(cliPath string, cfg *config.AZStorageConfig) { configPath := MakeConfigFile(cfg) defer os.Remove(configPath) //nolint:errcheck - cli, err := RunCli(cliPath, configPath, "delete-recursive", "") + cli, err := RunCli(cliPath, configPath, storageType, "delete-recursive", "") Expect(err).ToNot(HaveOccurred()) Expect(cli.ExitCode()).To(BeZero()) - cliSession, err := RunCli(cliPath, configPath, "list") + cliSession, err := RunCli(cliPath, configPath, storageType, "list") Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) @@ -308,41 +309,41 @@ func AssertOnListDeleteLifecyle(cliPath string, cfg *config.AZStorageConfig) { CreateRandomBlobs(cliPath, cfg, 2, otherPrefix) // Assert that the blobs are listed correctly - cliSession, err = RunCli(cliPath, configPath, "list") + cliSession, err = RunCli(cliPath, configPath, storageType, "list") Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) Expect(len(bytes.FieldsFunc(cliSession.Out.Contents(), func(r rune) bool { return r == '\n' || r == '\r' }))).To(BeNumerically("==", 10)) // Assert that the all blobs with custom prefix are listed correctly - cliSession, err = RunCli(cliPath, configPath, "list", customPrefix) + cliSession, err = RunCli(cliPath, configPath, storageType, "list", customPrefix) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) Expect(len(bytes.FieldsFunc(cliSession.Out.Contents(), func(r rune) bool { return r == '\n' || r == '\r' }))).To(BeNumerically("==", 4)) // Delete all blobs with custom prefix - cliSession, err = RunCli(cliPath, configPath, "delete-recursive", customPrefix) + cliSession, err = RunCli(cliPath, configPath, storageType, "delete-recursive", customPrefix) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) // Assert that the blobs with custom prefix are deleted - cliSession, err = RunCli(cliPath, configPath, "list", customPrefix) + cliSession, err = RunCli(cliPath, configPath, storageType, "list", customPrefix) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) Expect(len(cliSession.Out.Contents())).To(BeZero()) // Assert that the other prefixed blobs are still listed - cliSession, err = RunCli(cliPath, configPath, "list", otherPrefix) + cliSession, err = RunCli(cliPath, configPath, storageType, "list", otherPrefix) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) Expect(len(bytes.FieldsFunc(cliSession.Out.Contents(), func(r rune) bool { return r == '\n' || r == '\r' }))).To(BeNumerically("==", 2)) // Delete all other blobs - cliSession, err = RunCli(cliPath, configPath, "delete-recursive", "") + cliSession, err = RunCli(cliPath, configPath, storageType, "delete-recursive", "") Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) // Assert that all blobs are deleted - cliSession, err = RunCli(cliPath, configPath, "list") + cliSession, err = RunCli(cliPath, configPath, storageType, "list") Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) Expect(len(cliSession.Out.Contents())).To(BeZero()) @@ -358,18 +359,18 @@ func AssertOnCopy(cliPath string, cfg *config.AZStorageConfig) { contentFile := MakeContentFile(blobContent) defer os.Remove(contentFile) //nolint:errcheck - cliSession, err := RunCli(cliPath, configPath, "put", contentFile, blobName) + cliSession, err := RunCli(cliPath, configPath, storageType, "put", contentFile, blobName) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) // Copy the blob to a new name copiedBlobName := GenerateRandomString() - cliSession, err = RunCli(cliPath, configPath, "copy", blobName, copiedBlobName) + cliSession, err = RunCli(cliPath, configPath, storageType, "copy", blobName, copiedBlobName) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) // Assert that the copied blob exists - cliSession, err = RunCli(cliPath, configPath, "exists", copiedBlobName) + cliSession, err = RunCli(cliPath, configPath, storageType, "exists", copiedBlobName) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) @@ -379,7 +380,7 @@ func AssertOnCopy(cliPath string, cfg *config.AZStorageConfig) { err = tmpLocalFile.Close() Expect(err).ToNot(HaveOccurred()) defer os.Remove(tmpLocalFile.Name()) //nolint:errcheck - cliSession, err = RunCli(cliPath, configPath, "get", blobName, tmpLocalFile.Name()) + cliSession, err = RunCli(cliPath, configPath, storageType, "get", blobName, tmpLocalFile.Name()) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) gottenBytes, err := os.ReadFile(tmpLocalFile.Name()) @@ -387,10 +388,10 @@ func AssertOnCopy(cliPath string, cfg *config.AZStorageConfig) { Expect(string(gottenBytes)).To(Equal(blobContent)) // Clean up - cliSession, err = RunCli(cliPath, configPath, "delete", blobName) + cliSession, err = RunCli(cliPath, configPath, storageType, "delete", blobName) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) - cliSession, err = RunCli(cliPath, configPath, "delete", copiedBlobName) + cliSession, err = RunCli(cliPath, configPath, storageType, "delete", copiedBlobName) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) } @@ -407,7 +408,7 @@ func CreateRandomBlobs(cliPath string, cfg *config.AZStorageConfig, count int, p contentFile := MakeContentFile(GenerateRandomString()) defer os.Remove(contentFile) //nolint:errcheck - cliSession, err := RunCli(cliPath, configPath, "put", contentFile, blobName) + cliSession, err := RunCli(cliPath, configPath, storageType, "put", contentFile, blobName) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) } diff --git a/azurebs/integration/general_azure_test.go b/azurebs/integration/general_azure_test.go index bff36b7..c0ffa1d 100644 --- a/azurebs/integration/general_azure_test.go +++ b/azurebs/integration/general_azure_test.go @@ -13,6 +13,7 @@ import ( var _ = Describe("General testing for all Azure regions", func() { var defaultConfig config.AZStorageConfig + storageType := "azurebs" BeforeEach(func() { defaultConfig = config.AZStorageConfig{ @@ -62,7 +63,7 @@ var _ = Describe("General testing for all Azure regions", func() { configurations, ) DescribeTable("Assert Ensure Bucket Idempotent", - func(cfg *config.AZStorageConfig) { integration.AssertEnsureBucketIdempotent(cliPath, cfg) }, + func(cfg *config.AZStorageConfig) { integration.AssertEnsureStorageIdempotent(cliPath, cfg) }, configurations, ) DescribeTable("Assert Put Get With Special Names", @@ -113,16 +114,16 @@ var _ = Describe("General testing for all Azure regions", func() { It("uploads a file", func() { defer func() { - cliSession, err := integration.RunCli(cliPath, configPath, "delete", blobName) + cliSession, err := integration.RunCli(cliPath, configPath, storageType, "delete", blobName) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) }() - cliSession, err := integration.RunCli(cliPath, configPath, "put", contentFile, blobName) + cliSession, err := integration.RunCli(cliPath, configPath, storageType, "put", contentFile, blobName) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) - cliSession, err = integration.RunCli(cliPath, configPath, "exists", blobName) + cliSession, err = integration.RunCli(cliPath, configPath, storageType, "exists", blobName) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) Expect(string(cliSession.Err.Contents())).To(MatchRegexp("File '" + blobName + "' exists in bucket '" + defaultConfig.ContainerName + "'")) @@ -130,37 +131,36 @@ var _ = Describe("General testing for all Azure regions", func() { It("overwrites an existing file", func() { defer func() { - cliSession, err := integration.RunCli(cliPath, configPath, "delete", blobName) + cliSession, err := integration.RunCli(cliPath, configPath, storageType, "delete", blobName) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) }() - tmpLocalFile, _ := os.CreateTemp("", "azure-storage-cli-download") //nolint:errcheck - tmpLocalFile.Close() //nolint:errcheck - os.Remove(tmpLocalFile.Name()) //nolint:errcheck + tmpLocalFileName := "azure-storage-cli-download" + defer os.Remove(tmpLocalFileName) //nolint:errcheck contentFile = integration.MakeContentFile("initial content") - cliSession, err := integration.RunCli(cliPath, configPath, "put", contentFile, blobName) + cliSession, err := integration.RunCli(cliPath, configPath, storageType, "put", contentFile, blobName) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) - cliSession, err = integration.RunCli(cliPath, configPath, "get", blobName, tmpLocalFile.Name()) + cliSession, err = integration.RunCli(cliPath, configPath, storageType, "get", blobName, tmpLocalFileName) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) - gottenBytes, _ := os.ReadFile(tmpLocalFile.Name()) //nolint:errcheck + gottenBytes, _ := os.ReadFile(tmpLocalFileName) //nolint:errcheck Expect(string(gottenBytes)).To(Equal("initial content")) contentFile = integration.MakeContentFile("updated content") - cliSession, err = integration.RunCli(cliPath, configPath, "put", contentFile, blobName) + cliSession, err = integration.RunCli(cliPath, configPath, storageType, "put", contentFile, blobName) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) - cliSession, err = integration.RunCli(cliPath, configPath, "get", blobName, tmpLocalFile.Name()) + cliSession, err = integration.RunCli(cliPath, configPath, storageType, "get", blobName, tmpLocalFileName) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) - gottenBytes, _ = os.ReadFile(tmpLocalFile.Name()) //nolint:errcheck + gottenBytes, _ = os.ReadFile(tmpLocalFileName) //nolint:errcheck Expect(string(gottenBytes)).To(Equal("updated content")) }) @@ -173,7 +173,7 @@ var _ = Describe("General testing for all Azure regions", func() { configPath = integration.MakeConfigFile(cfg) - cliSession, err := integration.RunCli(cliPath, configPath, "put", contentFile, blobName) + cliSession, err := integration.RunCli(cliPath, configPath, storageType, "put", contentFile, blobName) Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(Equal(1)) diff --git a/azurebs/integration/integration_suite_test.go b/azurebs/integration/integration_suite_test.go index 5b30b75..509a115 100644 --- a/azurebs/integration/integration_suite_test.go +++ b/azurebs/integration/integration_suite_test.go @@ -20,7 +20,7 @@ var largeContent string //nolint:unused var _ = BeforeSuite(func() { if len(cliPath) == 0 { var err error - cliPath, err = gexec.Build("github.com/cloudfoundry/storage-cli/azurebs") + cliPath, err = gexec.Build("github.com/cloudfoundry/storage-cli") Expect(err).ShouldNot(HaveOccurred()) } }) diff --git a/azurebs/integration/utils.go b/azurebs/integration/utils.go index 1977221..c845ff1 100644 --- a/azurebs/integration/utils.go +++ b/azurebs/integration/utils.go @@ -51,10 +51,12 @@ func MakeContentFile(content string) string { return tmpFile.Name() } -func RunCli(cliPath string, configPath string, subcommand string, args ...string) (*gexec.Session, error) { +func RunCli(cliPath string, configPath string, storageType string, subcommand string, args ...string) (*gexec.Session, error) { cmdArgs := []string{ "-c", configPath, + "-s", + storageType, subcommand, } cmdArgs = append(cmdArgs, args...) diff --git a/azurebs/main.go b/azurebs/main.go deleted file mode 100644 index 698fc15..0000000 --- a/azurebs/main.go +++ /dev/null @@ -1,197 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "log" - "os" - "time" - - "github.com/cloudfoundry/storage-cli/azurebs/client" - "github.com/cloudfoundry/storage-cli/azurebs/config" -) - -var version string - -func main() { - - configPath := flag.String("c", "", "configuration path") - showVer := flag.Bool("v", false, "version") - flag.Parse() - - if *showVer { - fmt.Printf("version %s\n", version) - os.Exit(0) - } - - configFile, err := os.Open(*configPath) - if err != nil { - log.Fatalln(err) - } - - azConfig, err := config.NewFromReader(configFile) - if err != nil { - log.Fatalln(err) - } - - storageClient, err := client.NewStorageClient(azConfig) - if err != nil { - log.Fatalln(err) - } - - blobstoreClient, err := client.New(storageClient) - if err != nil { - log.Fatalln(err) - } - - nonFlagArgs := flag.Args() - cmd := nonFlagArgs[0] - - switch cmd { - case "put": - if len(nonFlagArgs) != 3 { - log.Fatalf("Put method expected 3 arguments got %d\n", len(nonFlagArgs)) - } - sourceFilePath, dst := nonFlagArgs[1], nonFlagArgs[2] - - _, err := os.Stat(sourceFilePath) - if err != nil { - log.Fatalln(err) - } - - err = blobstoreClient.Put(sourceFilePath, dst) - fatalLog(cmd, err) - - case "get": - if len(nonFlagArgs) != 3 { - log.Fatalf("Get method expected 3 arguments got %d\n", len(nonFlagArgs)) - } - src, dst := nonFlagArgs[1], nonFlagArgs[2] - - var dstFile *os.File - dstFile, err = os.Create(dst) - if err != nil { - log.Fatalln(err) - } - - defer dstFile.Close() //nolint:errcheck - - err = blobstoreClient.Get(src, dstFile) - fatalLog(cmd, err) - - case "copy": - if len(nonFlagArgs) != 3 { - log.Fatalf("Get method expected 3 arguments got %d\n", len(nonFlagArgs)) - } - - srcBlob, dstBlob := nonFlagArgs[1], nonFlagArgs[2] - - err = blobstoreClient.Copy(srcBlob, dstBlob) - fatalLog(cmd, err) - - case "delete": - if len(nonFlagArgs) != 2 { - log.Fatalf("Delete method expected 2 arguments got %d\n", len(nonFlagArgs)) - } - - err = blobstoreClient.Delete(nonFlagArgs[1]) - fatalLog(cmd, err) - - case "delete-recursive": - var prefix string - if len(nonFlagArgs) > 2 { - log.Fatalf("delete-recursive takes at most one argument (prefix) got %d\n", len(nonFlagArgs)-1) - } else if len(nonFlagArgs) == 2 { - prefix = nonFlagArgs[1] - } else { - prefix = "" - } - err = blobstoreClient.DeleteRecursive(prefix) - fatalLog("delete-recursive", err) - - case "exists": - if len(nonFlagArgs) != 2 { - log.Fatalf("Exists method expected 2 arguments got %d\n", len(nonFlagArgs)) - } - - var exists bool - exists, err = blobstoreClient.Exists(nonFlagArgs[1]) - - // If the object exists the exit status is 0, otherwise it is 3 - // We are using `3` since `1` and `2` have special meanings - if err == nil && !exists { - os.Exit(3) - } - - case "sign": - if len(nonFlagArgs) != 4 { - log.Fatalf("Sign method expects 3 arguments got %d\n", len(nonFlagArgs)-1) - } - - objectID, action := nonFlagArgs[1], nonFlagArgs[2] - - if action != "get" && action != "put" { - log.Fatalf("Action not implemented: %s. Available actions are 'get' and 'put'", action) - } - - expiration, err := time.ParseDuration(nonFlagArgs[3]) - if err != nil { - log.Fatalf("Expiration should be in the format of a duration i.e. 1h, 60m, 3600s. Got: %s", nonFlagArgs[3]) - } - - signedURL, err := blobstoreClient.Sign(objectID, action, expiration) - - if err != nil { - log.Fatalf("Failed to sign request: %s", err) - } - - fmt.Print(signedURL) - os.Exit(0) - - case "list": - var prefix string - - if len(nonFlagArgs) == 1 { - prefix = "" - } else if len(nonFlagArgs) == 2 { - prefix = nonFlagArgs[1] - } else { - log.Fatalf("List method expected 1 or 2 arguments, got %d\n", len(nonFlagArgs)-1) - } - - var objects []string - objects, err = blobstoreClient.List(prefix) - if err != nil { - log.Fatalf("Failed to list objects: %s", err) - } - - for _, object := range objects { - fmt.Println(object) - } - - case "properties": - if len(nonFlagArgs) != 2 { - log.Fatalf("Properties method expected 2 arguments got %d\n", len(nonFlagArgs)) - } - - err = blobstoreClient.Properties(nonFlagArgs[1]) - fatalLog("properties", err) - - case "ensure-bucket-exists": - if len(nonFlagArgs) != 1 { - log.Fatalf("EnsureBucketExists method expected 1 arguments got %d\n", len(nonFlagArgs)) - } - - err = blobstoreClient.EnsureContainerExists() - fatalLog("ensure-bucket-exists", err) - - default: - log.Fatalf("unknown command: '%s'\n", cmd) - } -} - -func fatalLog(cmd string, err error) { - if err != nil { - log.Fatalf("performing operation %s: %s\n", cmd, err) - } -} diff --git a/dav/README.md b/dav/README.md index 8601a09..1641195 100644 --- a/dav/README.md +++ b/dav/README.md @@ -1,36 +1,49 @@ -# Dav Storage CLI +# WebDAV Client -A CLI utility the BOSH Agent uses for accessing the [DAV blobstore](https://bosh.io/docs/director-configure-blobstore.html). +WebDAV client implementation for the unified storage-cli tool. This module provides WebDAV blobstore operations through the main storage-cli binary. -Inside stemcells this binary is on the PATH as `bosh-blobstore-dav`. +**Note:** This is not a standalone CLI. Use the main `storage-cli` binary with `-s dav` flag to access DAV functionality. -### Developers +For general usage and build instructions, see the [main README](../README.md). -To update dependencies, use `gvt update`. Here is a typical invocation to update the `bosh-utils` dependency: +## DAV-Specific Configuration -``` -gvt update github.com/cloudfoundry/bosh-utils -``` +The DAV client requires a JSON configuration file with WebDAV endpoint details and credentials. -### Run tests +**Usage examples:** +```bash +# Upload an object +storage-cli -s dav -c dav-config.json put local-file.txt remote-object -You can run the unit test with `ginkgo` as follows. +# Fetch an object +storage-cli -s dav -c dav-config.json get remote-object local-file.txt -``` -ginkgo -r -race -progress -mod vendor . -``` +# Delete an object +storage-cli -s dav -c dav-config.json delete remote-object + +# Check if an object exists +storage-cli -s dav -c dav-config.json exists remote-object -# Pre-signed URLs +# Generate a signed URL (e.g., GET for 1 hour) +storage-cli -s dav -c dav-config.json sign remote-object get 60s +``` -The command `sign` generates a pre-signed url for a specific object, action and duration: +## Pre-signed URLs -`dav-cli ` +The `sign` command generates a pre-signed URL for a specific object, action, and duration. -The request will be signed using HMAC-SHA256 with a secret provided in configuration. +The request is signed using HMAC-SHA256 with a secret provided in the configuration. The HMAC format is: `` -The generated URL will be of format: - +The generated URL format: `https://blobstore.url/signed/object-id?st=HMACSignatureHash&ts=GenerationTimestamp&e=ExpirationTimestamp` + +## Testing + +### Unit Tests +Run unit tests from the repository root: +```bash +ginkgo --cover -v -r ./dav/... +``` diff --git a/dav/app/app.go b/dav/app/app.go index 523e446..dfbe1d8 100644 --- a/dav/app/app.go +++ b/dav/app/app.go @@ -1,12 +1,9 @@ package app import ( - "encoding/json" "errors" - "flag" "fmt" - "io" - "os" + "time" davcmd "github.com/cloudfoundry/storage-cli/dav/cmd" davconfig "github.com/cloudfoundry/storage-cli/dav/config" @@ -14,59 +11,70 @@ import ( type App struct { runner davcmd.Runner + config davconfig.Config } -func New(runner davcmd.Runner) (app App) { - app.runner = runner - return +func New(r davcmd.Runner, c davconfig.Config) *App { + app := &App{runner: r, config: c} + return app } -func (app App) Run(args []string) (err error) { - args = args[1:] - var configFilePath string - var printVersion bool - - flagSet := flag.NewFlagSet("davcli-args", flag.ContinueOnError) - flagSet.StringVar(&configFilePath, "c", "", "Config file path") - flagSet.BoolVar(&printVersion, "v", false, "print version info") +func (app *App) run(args []string) (err error) { - err = flagSet.Parse(args) + err = app.runner.SetConfig(app.config) if err != nil { + err = fmt.Errorf("Invalid CA Certificate: %s", err.Error()) //nolint:staticcheck return } - if printVersion { - fmt.Println("davcli version [[version]]") - return - } + err = app.runner.Run(args) + return +} - if configFilePath == "" { - err = errors.New("Config file arg `-c` is missing") //nolint:staticcheck - return - } +func (app *App) Put(sourceFilePath string, destinationObject string) error { + return app.run([]string{"put", sourceFilePath, destinationObject}) +} - file, err := os.Open(configFilePath) - if err != nil { - return - } +func (app *App) Get(sourceObject string, dest string) error { + return app.run([]string{"get", sourceObject, dest}) +} - configBytes, err := io.ReadAll(file) - if err != nil { - return - } +func (app *App) Delete(object string) error { + return app.run([]string{"delete", object}) +} - config := davconfig.Config{} - err = json.Unmarshal(configBytes, &config) +func (app *App) Exists(object string) (bool, error) { + err := app.run([]string{"exists", object}) if err != nil { - return + return false, err } + return true, nil +} - err = app.runner.SetConfig(config) +func (app *App) Sign(object string, action string, expiration time.Duration) (string, error) { + err := app.run([]string{"sign", object, action, expiration.String()}) if err != nil { - err = fmt.Errorf("Invalid CA Certificate: %s", err.Error()) //nolint:staticcheck - return + return "", err } + return "", nil +} - err = app.runner.Run(args[2:]) - return +func (app *App) List(prefix string) ([]string, error) { + return nil, errors.New("not implemented") +} + +func (app *App) Copy(srcBlob string, dstBlob string) error { + return errors.New("not implemented") +} + +func (app *App) Properties(dest string) error { + return errors.New("not implemented") +} + +func (app *App) EnsureStorageExists() error { + return errors.New("not implemented") +} + +func (app *App) DeleteRecursive(prefix string) error { + return errors.New("not implemented") } diff --git a/dav/app/app_suite_test.go b/dav/app/app_suite_test.go index c352179..e4657e2 100644 --- a/dav/app/app_suite_test.go +++ b/dav/app/app_suite_test.go @@ -1,7 +1,7 @@ package app_test import ( - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "testing" @@ -9,5 +9,5 @@ import ( func TestApp(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "Davcli App Suite") + RunSpecs(t, "Dav App Suite") } diff --git a/dav/app/app_test.go b/dav/app/app_test.go index 39696d8..71d00c2 100644 --- a/dav/app/app_test.go +++ b/dav/app/app_test.go @@ -4,8 +4,9 @@ import ( "errors" "os" "path/filepath" + "time" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" . "github.com/cloudfoundry/storage-cli/dav/app" @@ -42,11 +43,15 @@ func pathToFixture(file string) string { } var _ = Describe("App", func() { + It("reads the CA cert from config", func() { - runner := &FakeRunner{} + configFile, _ := os.Open(pathToFixture("dav-cli-config-with-ca.json")) //nolint:errcheck + defer configFile.Close() //nolint:errcheck + davConfig, _ := davconf.NewFromReader(configFile) //nolint:errcheck - app := New(runner) - err := app.Run([]string{"dav-cli", "-c", pathToFixture("dav-cli-config-with-ca.json"), "put", "localFile", "remoteFile"}) + runner := &FakeRunner{} + app := New(runner, davConfig) + err := app.Put("localFile", "remoteFile") Expect(err).ToNot(HaveOccurred()) expectedConfig := davconf.Config{ @@ -66,22 +71,30 @@ var _ = Describe("App", func() { }) It("returns error if CA Cert is invalid", func() { + configFile, _ := os.Open(pathToFixture("dav-cli-config-with-ca.json")) //nolint:errcheck + defer configFile.Close() //nolint:errcheck + davConfig, _ := davconf.NewFromReader(configFile) //nolint:errcheck + runner := &FakeRunner{ SetConfigErr: errors.New("invalid cert"), } - app := New(runner) - err := app.Run([]string{"dav-cli", "-c", pathToFixture("dav-cli-config-with-ca.json"), "put", "localFile", "remoteFile"}) + app := New(runner, davConfig) + err := app.Put("localFile", "remoteFile") Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("Invalid CA Certificate: invalid cert")) }) It("runs the put command", func() { + configFile, _ := os.Open(pathToFixture("dav-cli-config.json")) //nolint:errcheck + defer configFile.Close() //nolint:errcheck + davConfig, _ := davconf.NewFromReader(configFile) //nolint:errcheck + runner := &FakeRunner{} - app := New(runner) - err := app.Run([]string{"dav-cli", "-c", pathToFixture("dav-cli-config.json"), "put", "localFile", "remoteFile"}) + app := New(runner, davConfig) + err := app.Put("localFile", "remoteFile") Expect(err).ToNot(HaveOccurred()) expectedConfig := davconf.Config{ @@ -96,29 +109,58 @@ var _ = Describe("App", func() { Expect(runner.RunArgs).To(Equal([]string{"put", "localFile", "remoteFile"})) }) - It("returns error with no config argument", func() { - runner := &FakeRunner{} + It("returns error from the cmd runner", func() { - app := New(runner) - err := app.Run([]string{"put", "localFile", "remoteFile"}) - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("Config file arg `-c` is missing")) - }) - It("prints the version info with the -v flag", func() { - runner := &FakeRunner{} - app := New(runner) - err := app.Run([]string{"dav-cli", "-v"}) - Expect(err).ToNot(HaveOccurred()) - }) + configFile, _ := os.Open(pathToFixture("dav-cli-config.json")) //nolint:errcheck + defer configFile.Close() //nolint:errcheck + davConfig, _ := davconf.NewFromReader(configFile) //nolint:errcheck - It("returns error from the cmd runner", func() { runner := &FakeRunner{ RunErr: errors.New("fake-run-error"), } - app := New(runner) - err := app.Run([]string{"dav-cli", "-c", pathToFixture("dav-cli-config.json"), "put", "localFile", "remoteFile"}) + app := New(runner, davConfig) + err := app.Put("localFile", "remoteFile") Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("fake-run-error")) }) + + Context("Checking functionalities", func() { + // var app *App + var davConfig davconf.Config + BeforeEach(func() { + + configFile, _ := os.Open(pathToFixture("dav-cli-config.json")) //nolint:errcheck + defer configFile.Close() //nolint:errcheck + davConfig, _ = davconf.NewFromReader(configFile) //nolint:errcheck + }) + + It("Exists fails", func() { + + runner := &FakeRunner{ + RunErr: errors.New("object does not exist"), + } + app := New(runner, davConfig) + + exist, err := app.Exists("someObject") //nolint:errcheck + + Expect(err.Error()).To(ContainSubstring("object does not exist")) + Expect(exist).To(BeFalse()) + + }) + + It("Sign Fails", func() { + runner := &FakeRunner{ + RunErr: errors.New("can't sign"), + } + + app := New(runner, davConfig) + signedurl, err := app.Sign("someObject", "SomeObject", time.Second*100) + Expect(signedurl).To(BeEmpty()) + Expect(err.Error()).To(ContainSubstring("can't sign")) + + }) + + }) + }) diff --git a/dav/client/client_suite_test.go b/dav/client/client_suite_test.go index a904d86..95b3f42 100644 --- a/dav/client/client_suite_test.go +++ b/dav/client/client_suite_test.go @@ -1,7 +1,7 @@ package client_test import ( - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "testing" @@ -9,5 +9,5 @@ import ( func TestClient(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "Davcli Client Suite") + RunSpecs(t, "Dav Client Suite") } diff --git a/dav/client/client_test.go b/dav/client/client_test.go index c210e55..a26eab8 100644 --- a/dav/client/client_test.go +++ b/dav/client/client_test.go @@ -5,7 +5,7 @@ import ( "net/http" "strings" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/onsi/gomega/ghttp" diff --git a/dav/cmd/cmd_suite_test.go b/dav/cmd/cmd_suite_test.go index f960f5b..8d36bcd 100644 --- a/dav/cmd/cmd_suite_test.go +++ b/dav/cmd/cmd_suite_test.go @@ -1,7 +1,7 @@ package cmd_test import ( - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "testing" @@ -9,5 +9,5 @@ import ( func TestCmd(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "Davcli Cmd Suite") + RunSpecs(t, "Dav Cmd Suite") } diff --git a/dav/cmd/delete_test.go b/dav/cmd/delete_test.go index 3b230ce..912c68b 100644 --- a/dav/cmd/delete_test.go +++ b/dav/cmd/delete_test.go @@ -4,7 +4,7 @@ import ( "net/http" "net/http/httptest" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" . "github.com/cloudfoundry/storage-cli/dav/cmd" diff --git a/dav/cmd/exists_test.go b/dav/cmd/exists_test.go index e5d11d8..0d01ce7 100644 --- a/dav/cmd/exists_test.go +++ b/dav/cmd/exists_test.go @@ -4,7 +4,7 @@ import ( "net/http" "net/http/httptest" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" boshlog "github.com/cloudfoundry/bosh-utils/logger" diff --git a/dav/cmd/factory_test.go b/dav/cmd/factory_test.go index 4caf61a..46378a6 100644 --- a/dav/cmd/factory_test.go +++ b/dav/cmd/factory_test.go @@ -3,7 +3,7 @@ package cmd_test import ( "reflect" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" boshlog "github.com/cloudfoundry/bosh-utils/logger" diff --git a/dav/cmd/get_test.go b/dav/cmd/get_test.go index c3d7008..0ab58a7 100644 --- a/dav/cmd/get_test.go +++ b/dav/cmd/get_test.go @@ -7,7 +7,7 @@ import ( "os" "path/filepath" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" boshlog "github.com/cloudfoundry/bosh-utils/logger" diff --git a/dav/cmd/put_test.go b/dav/cmd/put_test.go index 0c234ec..f7af661 100644 --- a/dav/cmd/put_test.go +++ b/dav/cmd/put_test.go @@ -7,7 +7,7 @@ import ( "os" "path/filepath" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" boshlog "github.com/cloudfoundry/bosh-utils/logger" diff --git a/dav/cmd/runner_test.go b/dav/cmd/runner_test.go index df65651..2087b1a 100644 --- a/dav/cmd/runner_test.go +++ b/dav/cmd/runner_test.go @@ -3,7 +3,7 @@ package cmd_test import ( "errors" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" . "github.com/cloudfoundry/storage-cli/dav/cmd" diff --git a/dav/cmd/sign_test.go b/dav/cmd/sign_test.go index f49731a..09a570d 100644 --- a/dav/cmd/sign_test.go +++ b/dav/cmd/sign_test.go @@ -5,7 +5,7 @@ import ( "io" "os" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" . "github.com/cloudfoundry/storage-cli/dav/cmd" diff --git a/dav/cmd/testing/testing_suite_test.go b/dav/cmd/testing/testing_suite_test.go index eb53406..e1ac225 100644 --- a/dav/cmd/testing/testing_suite_test.go +++ b/dav/cmd/testing/testing_suite_test.go @@ -1,7 +1,7 @@ package testing_test import ( - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "testing" @@ -9,5 +9,5 @@ import ( func TestTesting(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "Davcli Testing Suite") + RunSpecs(t, "Dav Testing Suite") } diff --git a/dav/config/config.go b/dav/config/config.go index 31c637b..40711ac 100644 --- a/dav/config/config.go +++ b/dav/config/config.go @@ -1,5 +1,10 @@ package config +import ( + "encoding/json" + "io" +) + type Config struct { User string Password string @@ -16,3 +21,19 @@ type TLS struct { type Cert struct { CA string } + +func NewFromReader(reader io.Reader) (Config, error) { + config := Config{} + + configBytes, err := io.ReadAll(reader) + if err != nil { + return config, err + } + + err = json.Unmarshal(configBytes, &config) + if err != nil { + return config, err + } + + return config, nil +} diff --git a/dav/config/config_suite_test.go b/dav/config/config_suite_test.go index dd39ef7..0214658 100644 --- a/dav/config/config_suite_test.go +++ b/dav/config/config_suite_test.go @@ -1,7 +1,7 @@ package config_test import ( - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "testing" @@ -9,5 +9,5 @@ import ( func TestConfig(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "Davcli Config Suite") + RunSpecs(t, "Dav Config Suite") } diff --git a/dav/main/dav.go b/dav/main/dav.go deleted file mode 100644 index 6f1822a..0000000 --- a/dav/main/dav.go +++ /dev/null @@ -1,30 +0,0 @@ -package main - -import ( - "fmt" - "os" - "strings" - - boshlog "github.com/cloudfoundry/bosh-utils/logger" - "github.com/cloudfoundry/storage-cli/dav/app" - "github.com/cloudfoundry/storage-cli/dav/cmd" -) - -func main() { - logger := boshlog.NewLogger(boshlog.LevelNone) - cmdFactory := cmd.NewFactory(logger) - - cmdRunner := cmd.NewRunner(cmdFactory) - - cli := app.New(cmdRunner) - - err := cli.Run(os.Args) - if err != nil { - if strings.Contains(err.Error(), "not found") { - fmt.Printf("Blob not found - %s", err.Error()) - os.Exit(3) - } - fmt.Printf("Error running app - %s", err.Error()) - os.Exit(1) - } -} diff --git a/dav/main/main_suite_test.go b/dav/main/main_suite_test.go deleted file mode 100644 index 92223fd..0000000 --- a/dav/main/main_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package main_test - -import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "testing" -) - -func TestMain(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Davcli Main Suite") -} diff --git a/dav/signer/signer_suite_test.go b/dav/signer/signer_suite_test.go index 4e8df40..8ffc144 100644 --- a/dav/signer/signer_suite_test.go +++ b/dav/signer/signer_suite_test.go @@ -1,7 +1,7 @@ package signer_test import ( - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "testing" @@ -9,5 +9,5 @@ import ( func TestSigner(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "DavCli Signer Suite") + RunSpecs(t, "Dav Signer Suite") } diff --git a/dav/signer/signer_test.go b/dav/signer/signer_test.go index d76264a..197a55d 100644 --- a/dav/signer/signer_test.go +++ b/dav/signer/signer_test.go @@ -4,7 +4,7 @@ import ( "time" "github.com/cloudfoundry/storage-cli/dav/signer" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/gcs/README.md b/gcs/README.md index a44e021..93f8821 100644 --- a/gcs/README.md +++ b/gcs/README.md @@ -1,45 +1,16 @@ -# GCS Storage CLI -A Golang CLI for uploading, fetching and deleting content to/from [Google Cloud Storage](https://cloud.google.com/storage/). -This tool exists to work with the [bosh-cli](https://github.com/cloudfoundry/bosh-cli) and [director](https://github.com/cloudfoundry/bosh). +# GCS Client -This is **not** an official Google Product. +GCS (Google Cloud Storage) client implementation for the unified storage-cli tool. This module provides Google Cloud Storage operations through the main storage-cli binary. +**Note:** This is not a standalone CLI. Use the main `storage-cli` binary with `-s gcs` flag to access GCS functionality. -## Commands +For general usage and build instructions, see the [main README](../README.md). -### Usage -```bash -gcs-cli --help -``` -### Upload an object -```bash -gcs-cli -c config.json put -``` -### Fetch an object -```bash -gcs-cli -c config.json get -``` -### Delete an object -```bash -gcs-cli -c config.json delete -``` -### Check if an object exists -```bash -gcs-cli -c config.json exists -``` +This is **not** an official Google Product. -### Generate a signed url for an object -If there is an encryption key present in the config, then an additional header is sent +## GCS-Specific Configuration -```bash -gcs-cli -c config.json sign -``` -Where: - - `` is GET, PUT, or DELETE - - `` is a duration string less than 7 days (e.g. "6h") - -## Configuration -The command line tool expects a JSON configuration file. Run `storage-cli-gcs --help` for details. +The GCS client requires a JSON configuration file. ### Authentication Methods (`credentials_source`) * `static`: A [service account](https://cloud.google.com/iam/docs/creating-managing-service-account-keys) key will be provided via the `json_key` field. @@ -48,11 +19,35 @@ The command line tool expects a JSON configuration file. Run `storage-cli-gcs -- will be used if they exist (either through `gcloud auth application-default login` or a [service account](https://cloud.google.com/iam/docs/understanding-service-accounts)). If they don't exist the client will fall back to `none` behavior. -## Running Tests -## Unit Tests -1. Use the command `make -C .github/scripts/gcs test-unit` +**Usage examples:** +```bash +# Upload an object +storage-cli -s gcs -c gcs-config.json put local-file.txt remote-blob + +# Fetch an object +storage-cli -s gcs -c gcs-config.json get remote-blob local-file.txt + +# Delete an object +storage-cli -s gcs -c gcs-config.json delete remote-blob + +# Check if an object exists +storage-cli -s gcs -c gcs-config.json exists remote-blob + +# Generate a signed URL (e.g., GET for 1 hour) +storage-cli -s gcs -c gcs-config.json sign remote-blob get 60s +``` + + +## Testing + +### Unit Tests +Run unit tests from the repository root: + +```bash +ginkgo --skip-package=integration --cover -v -r ./gcs/... +``` -## Integration Tests +### Integration Tests 1. Create a service account with the `Storage Admin` role. 1. Create a new key for your service account and download credential as JSON file. 1. Export json content with `export google_json_key_data="$(cat )"`. diff --git a/gcs/client/client.go b/gcs/client/client.go index 10b6aad..dea5424 100644 --- a/gcs/client/client.go +++ b/gcs/client/client.go @@ -22,6 +22,8 @@ import ( "fmt" "io" "log" + "os" + "strings" "time" "golang.org/x/oauth2/google" @@ -85,7 +87,13 @@ func New(ctx context.Context, cfg *config.GCSCli) (*GCSBlobstore, error) { // Get fetches a blob from the GCS blobstore. // Destination will be overwritten if it already exists. -func (client *GCSBlobstore) Get(src string, dest io.Writer) error { +func (client *GCSBlobstore) Get(src string, dest string) error { + dstFile, err := os.Create(dest) + if err != nil { + return err + } + defer dstFile.Close() //nolint:errcheck + reader, err := client.getReader(client.publicGCS, src) // If the public client fails, try using it as an authenticated actor @@ -97,7 +105,7 @@ func (client *GCSBlobstore) Get(src string, dest io.Writer) error { return err } - _, err = io.Copy(dest, reader) + _, err = io.Copy(dstFile, reader) return err } @@ -111,7 +119,14 @@ func (client *GCSBlobstore) getReader(gcs *storage.Client, src string) (*storage // Put retries retryAttempts times const retryAttempts = 3 -func (client *GCSBlobstore) Put(src io.ReadSeeker, dest string) error { +func (client *GCSBlobstore) Put(sourceFilePath string, dest string) error { + + src, err := os.Open(sourceFilePath) + if err != nil { + return err + } + defer src.Close() //nolint:errcheck + if client.readOnly() { return ErrInvalidROWriteOperation } @@ -126,7 +141,7 @@ func (client *GCSBlobstore) Put(src io.ReadSeeker, dest string) error { } var errs []error - for i := 0; i < retryAttempts; i++ { + for i := range retryAttempts { err := client.putOnce(src, dest) if err == nil { return nil @@ -144,11 +159,14 @@ func (client *GCSBlobstore) Put(src io.ReadSeeker, dest string) error { } func (client *GCSBlobstore) putOnce(src io.ReadSeeker, dest string) error { - remoteWriter := client.getObjectHandle(client.authenticatedGCS, dest).NewWriter(context.Background()) //nolint:staticcheck - remoteWriter.ObjectAttrs.StorageClass = client.config.StorageClass //nolint:staticcheck + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // Clean up the context after the function completes + + remoteWriter := client.getObjectHandle(client.authenticatedGCS, dest).NewWriter(ctx) //nolint:staticcheck + remoteWriter.ObjectAttrs.StorageClass = client.config.StorageClass //nolint:staticcheck if _, err := io.Copy(remoteWriter, src); err != nil { - remoteWriter.CloseWithError(err) //nolint:errcheck,staticcheck + remoteWriter.Close() //nolint:errcheck return err } @@ -201,6 +219,7 @@ func (client *GCSBlobstore) readOnly() bool { } func (client *GCSBlobstore) Sign(id string, action string, expiry time.Duration) (string, error) { + action = strings.ToUpper(action) token, err := google.JWTConfigFromJSON([]byte(client.config.ServiceAccountFile), storage.ScopeFullControl) if err != nil { return "", err @@ -225,3 +244,23 @@ func (client *GCSBlobstore) Sign(id string, action string, expiry time.Duration) } return storage.SignedURL(client.config.BucketName, id, &options) } + +func (client *GCSBlobstore) List(prefix string) ([]string, error) { + return nil, errors.New("not implemented") +} + +func (client *GCSBlobstore) Copy(srcBlob string, dstBlob string) error { + return errors.New("not implemented") +} + +func (client *GCSBlobstore) Properties(dest string) error { + return errors.New("not implemented") +} + +func (client *GCSBlobstore) EnsureStorageExists() error { + return errors.New("not implemented") +} + +func (client *GCSBlobstore) DeleteRecursive(prefix string) error { + return errors.New("not implemented") +} diff --git a/gcs/config/config_suite_test.go b/gcs/config/config_suite_test.go index 2bd95f0..513cc3c 100644 --- a/gcs/config/config_suite_test.go +++ b/gcs/config/config_suite_test.go @@ -25,5 +25,5 @@ import ( func TestConfig(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "Config Suite") + RunSpecs(t, "Gcs Config Suite") } diff --git a/gcs/integration/assertions.go b/gcs/integration/assertions.go index 2fa76f7..8410be5 100644 --- a/gcs/integration/assertions.go +++ b/gcs/integration/assertions.go @@ -36,39 +36,32 @@ const NoLongMsg = "environment variable %s filled, skipping long test" // This is using gomega matchers, so it will fail if called outside an // 'It' test. func AssertLifecycleWorks(gcsCLIPath string, ctx AssertContext) { - session, err := RunGCSCLI(gcsCLIPath, ctx.ConfigPath, - "put", ctx.ContentFile, ctx.GCSFileName) + storageType := "gcs" + session, err := RunGCSCLI(gcsCLIPath, ctx.ConfigPath, storageType, "put", ctx.ContentFile, ctx.GCSFileName) Expect(err).ToNot(HaveOccurred()) Expect(session.ExitCode()).To(BeZero()) - session, err = RunGCSCLI(gcsCLIPath, ctx.ConfigPath, - "exists", ctx.GCSFileName) + session, err = RunGCSCLI(gcsCLIPath, ctx.ConfigPath, storageType, "exists", ctx.GCSFileName) Expect(err).ToNot(HaveOccurred()) Expect(session.ExitCode()).To(BeZero()) Expect(session.Err.Contents()).To(MatchRegexp("File '.*' exists in bucket '.*'")) - tmpLocalFile, err := os.CreateTemp("", "gcscli-download") - Expect(err).ToNot(HaveOccurred()) - defer os.Remove(tmpLocalFile.Name()) //nolint:errcheck - err = tmpLocalFile.Close() - Expect(err).ToNot(HaveOccurred()) + tmpLocalFileName := "gcscli-download" + defer os.Remove(tmpLocalFileName) //nolint:errcheck - session, err = RunGCSCLI(gcsCLIPath, ctx.ConfigPath, - "get", ctx.GCSFileName, tmpLocalFile.Name()) + session, err = RunGCSCLI(gcsCLIPath, ctx.ConfigPath, storageType, "get", ctx.GCSFileName, tmpLocalFileName) Expect(err).ToNot(HaveOccurred()) Expect(session.ExitCode()).To(BeZero()) - gottenBytes, err := os.ReadFile(tmpLocalFile.Name()) + gottenBytes, err := os.ReadFile(tmpLocalFileName) Expect(err).ToNot(HaveOccurred()) Expect(string(gottenBytes)).To(Equal(ctx.ExpectedString)) - session, err = RunGCSCLI(gcsCLIPath, ctx.ConfigPath, - "delete", ctx.GCSFileName) + session, err = RunGCSCLI(gcsCLIPath, ctx.ConfigPath, storageType, "delete", ctx.GCSFileName) Expect(err).ToNot(HaveOccurred()) Expect(session.ExitCode()).To(BeZero()) - session, err = RunGCSCLI(gcsCLIPath, ctx.ConfigPath, - "exists", ctx.GCSFileName) + session, err = RunGCSCLI(gcsCLIPath, ctx.ConfigPath, storageType, "exists", ctx.GCSFileName) Expect(err).ToNot(HaveOccurred()) Expect(session.ExitCode()).To(Equal(3)) Expect(session.Err.Contents()).To(MatchRegexp("File '.*' does not exist in bucket '.*'")) diff --git a/gcs/integration/gcs_encryption_test.go b/gcs/integration/gcs_encryption_test.go index c37d71e..1c4319c 100644 --- a/gcs/integration/gcs_encryption_test.go +++ b/gcs/integration/gcs_encryption_test.go @@ -17,8 +17,8 @@ package integration import ( - "bytes" "crypto/sha256" + "os" "github.com/cloudfoundry/storage-cli/gcs/client" "github.com/cloudfoundry/storage-cli/gcs/config" @@ -35,6 +35,7 @@ var encryptionKeyBytes = []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 var encryptionKeyBytesHash = sha256.Sum256(encryptionKeyBytes) //nolint:unused var _ = Describe("Integration", func() { + storageType := "gcs" Context("general (Default Applicaton Credentials) configuration", func() { var ( env AssertContext @@ -59,11 +60,9 @@ var _ = Describe("Integration", func() { // tests that uploading a blob with encryption // results in failure to download when the key is changed. It("fails to get with the wrong encryption_key", func() { - Expect(env.Config.EncryptionKey).ToNot(BeNil(), - "Need encryption key for test") + Expect(env.Config.EncryptionKey).ToNot(BeNil(), "Need encryption key for test") - session, err := RunGCSCLI(gcsCLIPath, env.ConfigPath, - "put", env.ContentFile, env.GCSFileName) + session, err := RunGCSCLI(gcsCLIPath, env.ConfigPath, storageType, "put", env.ContentFile, env.GCSFileName) Expect(err).ToNot(HaveOccurred()) Expect(session.ExitCode()).To(BeZero()) @@ -72,11 +71,12 @@ var _ = Describe("Integration", func() { env.Config.EncryptionKey[0]++ - var target bytes.Buffer - err = blobstoreClient.Get(env.GCSFileName, &target) + tmpFileName := "gcscli-test-wrong-enc-key" + defer os.Remove(tmpFileName) //nolint:errcheck + err = blobstoreClient.Get(env.GCSFileName, tmpFileName) Expect(err).To(HaveOccurred()) - session, err = RunGCSCLI(gcsCLIPath, env.ConfigPath, "delete", env.GCSFileName) + session, err = RunGCSCLI(gcsCLIPath, env.ConfigPath, storageType, "delete", env.GCSFileName) Expect(err).ToNot(HaveOccurred()) Expect(session.ExitCode()).To(BeZero()) }) @@ -87,7 +87,7 @@ var _ = Describe("Integration", func() { Expect(env.Config.EncryptionKey).ToNot(BeNil(), "Need encryption key for test") - session, err := RunGCSCLI(gcsCLIPath, env.ConfigPath, "put", env.ContentFile, env.GCSFileName) + session, err := RunGCSCLI(gcsCLIPath, env.ConfigPath, storageType, "put", env.ContentFile, env.GCSFileName) Expect(err).ToNot(HaveOccurred()) Expect(session.ExitCode()).To(BeZero()) @@ -96,11 +96,12 @@ var _ = Describe("Integration", func() { env.Config.EncryptionKey = nil - var target bytes.Buffer - err = blobstoreClient.Get(env.GCSFileName, &target) + tmpFileName := "gcscli-test-no-enc-key" + defer os.Remove(tmpFileName) //nolint:errcheck + err = blobstoreClient.Get(env.GCSFileName, tmpFileName) Expect(err).To(HaveOccurred()) - session, err = RunGCSCLI(gcsCLIPath, env.ConfigPath, "delete", env.GCSFileName) + session, err = RunGCSCLI(gcsCLIPath, env.ConfigPath, storageType, "delete", env.GCSFileName) Expect(err).ToNot(HaveOccurred()) Expect(session.ExitCode()).To(BeZero()) }) diff --git a/gcs/integration/gcs_general_test.go b/gcs/integration/gcs_general_test.go index d0ce8da..3beabaf 100644 --- a/gcs/integration/gcs_general_test.go +++ b/gcs/integration/gcs_general_test.go @@ -17,10 +17,9 @@ package integration import ( - "crypto/rand" "fmt" - "io" "os" + "syscall" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -29,43 +28,9 @@ import ( "github.com/cloudfoundry/storage-cli/gcs/config" ) -// randReadSeeker is a ReadSeeker which returns random content and -// non-nil error for every operation. -// -// crypto/rand is used to ensure any compression -// applied to the reader's output doesn't effect the work we intend to do. -type randReadSeeker struct { - reader io.Reader -} - -func newrandReadSeeker(maxSize int64) randReadSeeker { - limited := io.LimitReader(rand.Reader, maxSize) - return randReadSeeker{limited} -} - -func (rrs *randReadSeeker) Read(p []byte) (n int, err error) { - return rrs.reader.Read(p) -} - -func (rrs *randReadSeeker) Seek(offset int64, whenc int) (n int64, err error) { - return offset, nil -} - -// badReadSeeker is a ReadSeeker which returns a non-nil error -// for every operation. -type badReadSeeker struct{} - -var badReadSeekerErr = io.ErrUnexpectedEOF - -func (brs *badReadSeeker) Read(p []byte) (n int, err error) { - return 0, badReadSeekerErr -} - -func (brs *badReadSeeker) Seek(offset int64, whenc int) (n int64, err error) { - return 0, badReadSeekerErr -} - var _ = Describe("Integration", func() { + storageType := "gcs" + Context("general (Default Applicaton Credentials) configuration", func() { var env AssertContext BeforeEach(func() { @@ -88,8 +53,7 @@ var _ = Describe("Integration", func() { func(config *config.GCSCli) { env.AddConfig(config) - session, err := RunGCSCLI(gcsCLIPath, env.ConfigPath, - "delete", env.GCSFileName) + session, err := RunGCSCLI(gcsCLIPath, env.ConfigPath, storageType, "delete", env.GCSFileName) Expect(err).ToNot(HaveOccurred()) Expect(session.ExitCode()).To(BeZero()) }, @@ -111,12 +75,14 @@ var _ = Describe("Integration", func() { } const twoGB = 1024 * 1024 * 1024 * 2 - limited := newrandReadSeeker(twoGB) + + largeFile := MakeContentFile(GenerateRandomString(twoGB)) + defer os.Remove(largeFile) //nolint:errcheck blobstoreClient, err := client.New(env.ctx, env.Config) Expect(err).ToNot(HaveOccurred()) - err = blobstoreClient.Put(&limited, env.GCSFileName) + err = blobstoreClient.Put(largeFile, env.GCSFileName) Expect(err).ToNot(HaveOccurred()) blobstoreClient.Delete(env.GCSFileName) //nolint:errcheck @@ -131,8 +97,21 @@ var _ = Describe("Integration", func() { blobstoreClient, err := client.New(env.ctx, env.Config) Expect(err).ToNot(HaveOccurred()) - err = blobstoreClient.Put(&badReadSeeker{}, env.GCSFileName) - Expect(err).To(HaveOccurred()) + // create pipe to be open but not seekable + pipePath := fmt.Sprintf("/tmp/%s", GenerateRandomString(10)) + err = syscall.Mkfifo(pipePath, 0666) + Expect(err).ToNot(HaveOccurred()) + + go func() { + // This will block until the main test opens the pipe for reading. + writer, _ := os.OpenFile(pipePath, os.O_WRONLY, 0) //nolint:errcheck + if writer != nil { + writer.Close() //nolint:errcheck + } + }() + + err = blobstoreClient.Put(pipePath, env.GCSFileName) + Expect(err).To(MatchError(ContainSubstring("illegal seek"))) }, configurations) @@ -140,8 +119,7 @@ var _ = Describe("Integration", func() { func(config *config.GCSCli) { env.AddConfig(config) - session, err := RunGCSCLI(gcsCLIPath, env.ConfigPath, - "get", env.GCSFileName, "/dev/null") + session, err := RunGCSCLI(gcsCLIPath, env.ConfigPath, storageType, "get", env.GCSFileName, "/dev/null") Expect(err).ToNot(HaveOccurred()) Expect(session.ExitCode()).ToNot(BeZero()) Expect(session.Err.Contents()).To(ContainSubstring("object doesn't exist")) diff --git a/gcs/integration/gcs_public_test.go b/gcs/integration/gcs_public_test.go index 3a07fb6..b64ca36 100644 --- a/gcs/integration/gcs_public_test.go +++ b/gcs/integration/gcs_public_test.go @@ -30,6 +30,7 @@ import ( ) var _ = Describe("GCS Public Bucket", func() { + storageType := "gcs" Context("with read-only configuration", func() { var ( setupEnv AssertContext @@ -54,7 +55,7 @@ var _ = Describe("GCS Public Bucket", func() { Describe("with a public file", func() { BeforeEach(func() { // Place a file in the bucket - RunGCSCLI(gcsCLIPath, setupEnv.ConfigPath, "put", setupEnv.ContentFile, setupEnv.GCSFileName) //nolint:errcheck + RunGCSCLI(gcsCLIPath, setupEnv.ConfigPath, storageType, "put", setupEnv.ContentFile, setupEnv.GCSFileName) //nolint:errcheck // Make the file public rwClient, err := newSDK(setupEnv.ctx, *setupEnv.Config) @@ -64,48 +65,46 @@ var _ = Describe("GCS Public Bucket", func() { Expect(obj.ACL().Set(context.Background(), storage.AllUsers, storage.RoleReader)).To(Succeed()) }) AfterEach(func() { - RunGCSCLI(gcsCLIPath, setupEnv.ConfigPath, "delete", setupEnv.GCSFileName) //nolint:errcheck + RunGCSCLI(gcsCLIPath, setupEnv.ConfigPath, storageType, "delete", setupEnv.GCSFileName) //nolint:errcheck publicEnv.Cleanup() }) It("can check if it exists", func() { - session, err := RunGCSCLI(gcsCLIPath, publicEnv.ConfigPath, "exists", setupEnv.GCSFileName) + session, err := RunGCSCLI(gcsCLIPath, publicEnv.ConfigPath, storageType, "exists", setupEnv.GCSFileName) Expect(err).ToNot(HaveOccurred()) Expect(session.ExitCode()).To(BeZero()) }) It("can get", func() { - tmpLocalFile, err := os.CreateTemp("", "gcscli-download") - Expect(err).ToNot(HaveOccurred()) - defer os.Remove(tmpLocalFile.Name()) //nolint:errcheck - Expect(tmpLocalFile.Close()).To(Succeed()) + tmpLocalFileName := "gcscli-download" + defer os.Remove(tmpLocalFileName) //nolint:errcheck - session, err := RunGCSCLI(gcsCLIPath, publicEnv.ConfigPath, "get", setupEnv.GCSFileName, tmpLocalFile.Name()) + session, err := RunGCSCLI(gcsCLIPath, publicEnv.ConfigPath, storageType, "get", setupEnv.GCSFileName, tmpLocalFileName) Expect(err).ToNot(HaveOccurred()) Expect(session.ExitCode()).To(BeZero(), fmt.Sprintf("unexpected '%s'", session.Err.Contents())) - gottenBytes, err := os.ReadFile(tmpLocalFile.Name()) + gottenBytes, err := os.ReadFile(tmpLocalFileName) Expect(err).ToNot(HaveOccurred()) Expect(string(gottenBytes)).To(Equal(setupEnv.ExpectedString)) }) }) It("fails to get a missing file", func() { - session, err := RunGCSCLI(gcsCLIPath, publicEnv.ConfigPath, "get", setupEnv.GCSFileName, "/dev/null") + session, err := RunGCSCLI(gcsCLIPath, publicEnv.ConfigPath, storageType, "get", setupEnv.GCSFileName, "/dev/null") Expect(err).ToNot(HaveOccurred()) Expect(session.ExitCode()).ToNot(BeZero()) Expect(session.Err.Contents()).To(ContainSubstring("object doesn't exist")) }) It("fails to put", func() { - session, err := RunGCSCLI(gcsCLIPath, publicEnv.ConfigPath, "put", publicEnv.ContentFile, publicEnv.GCSFileName) + session, err := RunGCSCLI(gcsCLIPath, publicEnv.ConfigPath, storageType, "put", publicEnv.ContentFile, publicEnv.GCSFileName) Expect(err).ToNot(HaveOccurred()) Expect(session.ExitCode()).ToNot(BeZero()) Expect(session.Err.Contents()).To(ContainSubstring(client.ErrInvalidROWriteOperation.Error())) }) It("fails to delete", func() { - session, err := RunGCSCLI(gcsCLIPath, publicEnv.ConfigPath, "delete", publicEnv.GCSFileName) + session, err := RunGCSCLI(gcsCLIPath, publicEnv.ConfigPath, storageType, "delete", publicEnv.GCSFileName) Expect(err).ToNot(HaveOccurred()) Expect(session.ExitCode()).ToNot(BeZero()) Expect(session.Err.Contents()).To(ContainSubstring(client.ErrInvalidROWriteOperation.Error())) diff --git a/gcs/integration/gcs_static_test.go b/gcs/integration/gcs_static_test.go index fff4f29..c23a008 100644 --- a/gcs/integration/gcs_static_test.go +++ b/gcs/integration/gcs_static_test.go @@ -28,6 +28,7 @@ import ( ) var _ = Describe("Integration", func() { + storageType := "gcs" Context("static credentials configuration with a regional bucket", func() { var ( ctx AssertContext @@ -47,13 +48,13 @@ var _ = Describe("Integration", func() { }) It("validates the action is valid", func() { - session, err := RunGCSCLI(gcsCLIPath, ctx.ConfigPath, "sign", ctx.GCSFileName, "not-valid", "1h") + session, err := RunGCSCLI(gcsCLIPath, ctx.ConfigPath, storageType, "sign", ctx.GCSFileName, "not-valid", "1h") Expect(err).NotTo(HaveOccurred()) Expect(session.ExitCode()).ToNot(Equal(0)) }) It("can generate a signed url for a given object and action", func() { - session, err := RunGCSCLI(gcsCLIPath, ctx.ConfigPath, "sign", ctx.GCSFileName, "put", "1h") + session, err := RunGCSCLI(gcsCLIPath, ctx.ConfigPath, storageType, "sign", ctx.GCSFileName, "put", "1h") Expect(err).ToNot(HaveOccurred()) Expect(session.ExitCode()).To(Equal(0)) @@ -90,12 +91,12 @@ var _ = Describe("Integration", func() { // echo -n key | base64 -D | shasum -a 256 | cut -f1 -d' ' | tr -d '\n' | xxd -r -p | base64 hash := "bQOB9Mp048LRjpIoKm2njgQgiC3FRO2gn/+x6Vlfa4E=" - session, err := RunGCSCLI(gcsCLIPath, ctx.ConfigPath, "sign", ctx.GCSFileName, "PUT", "1h") + session, err := RunGCSCLI(gcsCLIPath, ctx.ConfigPath, storageType, "sign", ctx.GCSFileName, "put", "1h") Expect(err).ToNot(HaveOccurred()) signedPutUrl := string(session.Out.Contents()) Expect(signedPutUrl).ToNot(BeNil()) - session, err = RunGCSCLI(gcsCLIPath, ctx.ConfigPath, "sign", ctx.GCSFileName, "GET", "1h") + session, err = RunGCSCLI(gcsCLIPath, ctx.ConfigPath, storageType, "sign", ctx.GCSFileName, "get", "1h") Expect(err).ToNot(HaveOccurred()) signedGetUrl := string(session.Out.Contents()) Expect(signedGetUrl).ToNot(BeNil()) diff --git a/gcs/integration/integration_suite_test.go b/gcs/integration/integration_suite_test.go index d18d685..ff69276 100644 --- a/gcs/integration/integration_suite_test.go +++ b/gcs/integration/integration_suite_test.go @@ -34,7 +34,7 @@ func TestIntegration(t *testing.T) { var _ = BeforeSuite(func() { // Integration test against the CLI means we need the binary. var err error - gcsCLIPath, err = gexec.Build("github.com/cloudfoundry/storage-cli/gcs") + gcsCLIPath, err = gexec.Build("github.com/cloudfoundry/storage-cli") Expect(err).ShouldNot(HaveOccurred()) }) diff --git a/gcs/integration/utils.go b/gcs/integration/utils.go index 400db50..2ba0bfd 100644 --- a/gcs/integration/utils.go +++ b/gcs/integration/utils.go @@ -71,12 +71,13 @@ func MakeContentFile(content string) string { // RunGCSCLI run the gcscli and outputs the session // after waiting for it to finish -func RunGCSCLI(gcsCLIPath, configPath, subcommand string, - args ...string) (*gexec.Session, error) { +func RunGCSCLI(gcsCLIPath, configPath, storageType, subcommand string, args ...string) (*gexec.Session, error) { cmdArgs := []string{ "-c", configPath, + "-s", + storageType, subcommand, } cmdArgs = append(cmdArgs, args...) diff --git a/gcs/main.go b/gcs/main.go deleted file mode 100644 index 1cf4ad2..0000000 --- a/gcs/main.go +++ /dev/null @@ -1,220 +0,0 @@ -/* - * Copyright 2017 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package main - -import ( - "flag" - "fmt" - "log" - "net/http" - "os" - "strings" - "time" - - "golang.org/x/net/context" - - "github.com/cloudfoundry/storage-cli/gcs/client" - "github.com/cloudfoundry/storage-cli/gcs/config" -) - -var version = "dev" - -// usageExample provides examples of how to use the CLI. -const usageExample = ` -# Usage -storage-cli-gcs --help - -# Upload a blob to the GCS blobstore. -storage-cli-gcs -c config.json put - -# Fetch a blob from the GCS blobstore. -# Destination file will be overwritten if exists. -storage-cli-gcs -c config.json get - -# Remove a blob from the GCS blobstore. -storage-cli-gcs -c config.json delete - -# Checks if blob exists in the GCS blobstore. -storage-cli-gcs -c config.json exists - -# Generate a signed url for an object -# if an encryption key is present in config, the appropriate header will be sent -# users of the signed url must include encryption headers in request -# Where: -# - is GET, PUT, or DELETE -# - is a duration string less than 7 days (e.g. "6h") -# eg storage-cli-gcs -c config.json sign blobid PUT 24h -storage-cli-gcs -c config.json sign ` - -var ( - showVer = flag.Bool("v", false, "Print CLI version") - shortHelp = flag.Bool("h", false, "Print this help text") - longHelp = flag.Bool("help", false, "Print this help text") - configPath = flag.String("c", "", - `path to a JSON file with the following contents: - { - "bucket_name": "name of Google Cloud Storage bucket (required)", - "credentials_source": "Optional, defaults to Application Default Credentials or none) - (can be 'static' for a service account specified in json_key), - (can be 'none' for explicitly no credentials)" - "json_key": "JSON Service Account File - (optional, required for 'static' credentials)", - "storage_class": "storage class for objects - (optional, defaults to bucket settings)", - "encryption_key": "Base64 encoded 32 byte Customer-Supplied - encryption key used to encrypt objects - (optional, defaults to GCS controlled key)" - } - - storage_class is one of MULTI_REGIONAL, REGIONAL, NEARLINE, or COLDLINE. - For more information on characteristics and location compatibility: - https://cloud.google.com/storage/docs/storage-classes - - For more information on Customer-Supplied encryption keys: - https://cloud.google.com/storage/docs/encryption -`) -) - -func main() { - flag.Parse() - - if *showVer { - fmt.Printf("version %s\n", version) - os.Exit(0) - } - - if *shortHelp || *longHelp || len(flag.Args()) == 0 { - flag.Usage() - fmt.Println(usageExample) - os.Exit(0) - } - - if *configPath == "" { - log.Fatalf("no config file provided\nSee -help for usage\n") - } - - configFile, err := os.Open(*configPath) - if err != nil { - log.Fatalf("opening config %s: %v\n", *configPath, err) - } - - gcsConfig, err := config.NewFromReader(configFile) - if err != nil { - log.Fatalf("reading config %s: %v\n", *configPath, err) - } - - ctx := context.Background() - blobstoreClient, err := client.New(ctx, &gcsConfig) - if err != nil { - log.Fatalf("creating gcs client: %v\n", err) - } - - nonFlagArgs := flag.Args() - if len(nonFlagArgs) < 2 { - log.Fatalf("Expected at least two arguments got %d\n", len(nonFlagArgs)) - } - - cmd := nonFlagArgs[0] - - switch cmd { - case "put": - if len(nonFlagArgs) != 3 { - log.Fatalf("put method expected 2 arguments got %d\n", len(nonFlagArgs)) - } - src, dst := nonFlagArgs[1], nonFlagArgs[2] - - var sourceFile *os.File - sourceFile, err = os.Open(src) - if err != nil { - log.Fatalln(err) - } - - defer sourceFile.Close() //nolint:errcheck - err = blobstoreClient.Put(sourceFile, dst) - fmt.Println(err) - case "get": - if len(nonFlagArgs) != 3 { - log.Fatalf("get method expected 2 arguments got %d\n", len(nonFlagArgs)) - } - src, dst := nonFlagArgs[1], nonFlagArgs[2] - - var dstFile *os.File - dstFile, err = os.Create(dst) - if err != nil { - log.Fatalln(err) - } - - defer dstFile.Close() //nolint:errcheck - err = blobstoreClient.Get(src, dstFile) - case "delete": - if len(nonFlagArgs) != 2 { - log.Fatalf("delete method expected 2 arguments got %d\n", len(nonFlagArgs)) - } - - err = blobstoreClient.Delete(nonFlagArgs[1]) - case "exists": - if len(nonFlagArgs) != 2 { - log.Fatalf("exists method expected 2 arguments got %d\n", len(nonFlagArgs)) - } - - var exists bool - exists, err = blobstoreClient.Exists(nonFlagArgs[1]) - - // If the object exists the exit status is 0, otherwise it is 3 - // We are using `3` since `1` and `2` have special meanings - if err == nil && !exists { - os.Exit(3) - } - case "sign": - if len(nonFlagArgs) != 4 { - log.Fatalf("sign method expected 3 arguments got %d\n", len(nonFlagArgs)) - } - - id, action, expiry := nonFlagArgs[1], nonFlagArgs[2], nonFlagArgs[3] - - action = strings.ToUpper(action) - err = validateAction(action) - if err != nil { - log.Fatal(err) - } - - var expiryDuration time.Duration - expiryDuration, err = time.ParseDuration(expiry) - if err != nil { - log.Fatalf("Invalid expiry duration: %v", err) - } - url := "" - url, err = blobstoreClient.Sign(id, action, expiryDuration) - if err == nil { - os.Stdout.WriteString(url) //nolint:errcheck - } - - default: - log.Fatalf("unknown command: '%s'\n", cmd) - } - - if err != nil { - log.Fatalf("performing operation %s: %s\n", cmd, err) - } -} - -func validateAction(action string) error { - if action != http.MethodGet && action != http.MethodPut && action != http.MethodDelete { - return fmt.Errorf("invalid signing action: %s must be GET, PUT, or DELETE", action) - } - return nil -} diff --git a/go.mod b/go.mod index ff1d936..5a6cec9 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,6 @@ require ( github.com/aws/smithy-go v1.23.1 github.com/cloudfoundry/bosh-utils v0.0.560 github.com/maxbrunsfeld/counterfeiter/v6 v6.12.0 - github.com/onsi/ginkgo v1.16.5 github.com/onsi/ginkgo/v2 v2.27.2 github.com/onsi/gomega v1.38.2 golang.org/x/net v0.46.0 @@ -59,7 +58,6 @@ require ( github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/go-jose/go-jose/v4 v4.1.2 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect @@ -99,5 +97,4 @@ require ( google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8 // indirect google.golang.org/grpc v1.76.0 // indirect google.golang.org/protobuf v1.36.10 // indirect - gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect ) diff --git a/go.sum b/go.sum index c347d72..1851e0b 100644 --- a/go.sum +++ b/go.sum @@ -102,8 +102,6 @@ github.com/cloudfoundry/socks5-proxy v0.2.158 h1:R+7NlxmzCiTMAyZqNt77G/DgdEINcEj github.com/cloudfoundry/socks5-proxy v0.2.158/go.mod h1:RFuO7DkORi74ijYHjGNWiW2OSNxmklFWcxp22KdbO7Y= github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 h1:aQ3y1lwWyqYPiWZThqv1aFbZMiM9vblcSArJRf2Irls= github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.13.4 h1:zEqyPVyku6IvWCFwux4x9RxkLOMUL+1vC9xUFv5l2/M= @@ -116,8 +114,6 @@ github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfU github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/gkampitakis/ciinfo v0.3.2 h1:JcuOPk8ZU7nZQjdUhctuhQofk7BGHuIy0c9Ez8BNhXs= @@ -133,25 +129,14 @@ github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw= github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA= github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= @@ -166,7 +151,6 @@ github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= github.com/googleapis/gax-go/v2 v2.15.0 h1:SyjDc1mGgZU5LncH8gimWo9lW1DtIfPibOG81vgd/bo= github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld9rcmyfL1OZhoc= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/joshdk/go-junit v1.0.0 h1:S86cUKIdwBHWwA6xCmFlf3RTLfVXYQfvanM5Uh+K6GE= github.com/joshdk/go-junit v1.0.0/go.mod h1:TiiV0PqkaNfFXjEiyjWM3XXrhVyCa1K4Zfga6W52ung= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= @@ -185,18 +169,12 @@ github.com/mfridman/tparse v0.18.0 h1:wh6dzOKaIwkUGyKgOntDW4liXSo37qg5AXbIhkMV3v github.com/mfridman/tparse v0.18.0/go.mod h1:gEvqZTuCgEhPbYk/2lS3Kcxg1GmTxxU7kTC8DvP0i/A= github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d h1:VhgPp6v9qf9Agr/56bj7Y/xa04UccTW04VP0Qed4vnQ= github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d/go.mod h1:YUTz3bUH2ZwIWBy3CJBeOBEugqcmXREj14T+iG/4k4U= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns= github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= github.com/pivotal-cf/paraphernalia v0.0.0-20180203224945-a64ae2051c20 h1:DR5eMfe2+6GzLkVyWytdtgUxgbPiOfvKDuqityTV3y8= @@ -207,7 +185,6 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= @@ -218,8 +195,6 @@ github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8W github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g= github.com/square/certstrap v1.3.0 h1:N9P0ZRA+DjT8pq5fGDj0z3FjafRKnBDypP0QHpMlaAk= github.com/square/certstrap v1.3.0/go.mod h1:wGZo9eE1B7WX2GKBn0htJ+B3OuRl2UsdCFySNooy9hU= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= @@ -230,7 +205,6 @@ github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM= github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= @@ -257,57 +231,27 @@ go.step.sm/crypto v0.70.0 h1:Q9Ft7N637mucyZcHZd1+0VVQJVwDCKqcb9CYcYi7cds= go.step.sm/crypto v0.70.0/go.mod h1:pzfUhS5/ue7ev64PLlEgXvhx1opwbhFCjkvlhsxVds0= go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04= golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.28.0 h1:gQBtGhjxykdjY9YhZpSlZIsbnaE2+PgjfLWUQTnoZ1U= golang.org/x/mod v0.28.0/go.mod h1:yfB/L0NOf/kmEbXjzCPOx1iK1fRutOydrCMsqRhEBxI= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4= golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210= golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY= golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ= golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q= golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k= golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM= golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.37.0 h1:DVSRzp7FwePZW356yEAChSdNcQo6Nsp+fex1SUW09lE= golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/api v0.254.0 h1:jl3XrGj7lRjnlUvZAbAdhINTLbsg5dbjmR90+pTQvt4= @@ -320,22 +264,12 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8 h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= google.golang.org/grpc v1.76.0 h1:UnVkv1+uMLYXoIz6o7chp59WfQUYA2ex/BXQ9rHZu7A= google.golang.org/grpc v1.76.0/go.mod h1:Ju12QI8M6iQJtbcsV+awF5a4hfJMLi4X0JLo94ULZ6c= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/main.go b/main.go new file mode 100644 index 0000000..0b76400 --- /dev/null +++ b/main.go @@ -0,0 +1,62 @@ +package main + +import ( + "flag" + "fmt" + "log" + "os" + + storage "github.com/cloudfoundry/storage-cli/storage" +) + +var version string + +func fatalLog(cmd string, err error) { + if err == nil { + return + } + // If the object exists the exit status is 0, otherwise it is 3 + // We are using `3` since `1` and `2` have special meanings + if _, ok := err.(*storage.NotExistsError); ok { + log.Printf("performing operation %s: %s\n", cmd, err) + os.Exit(3) + } + log.Fatalf("performing operation %s: %s\n", cmd, err) + +} + +func main() { + + configPath := flag.String("c", "", "configuration path") + showVer := flag.Bool("v", false, "version") + storageType := flag.String("s", "s3", "storage type: azurebs|alioss|s3|gcs|dav") + flag.Parse() + + if *showVer { + fmt.Printf("version %s\n", version) + os.Exit(0) + } + + configFile, err := os.Open(*configPath) + if err != nil { + log.Fatalln(err) + } + defer configFile.Close() //nolint:errcheck + + client, err := storage.NewStorageClient(*storageType, configFile) + if err != nil { + log.Fatalln(err) + } + + cex := storage.NewCommandExecuter(client) + + nonFlagArgs := flag.Args() + if len(nonFlagArgs) < 1 { + log.Fatalf("Expected at least 1 argument (command) got 0") + } + + cmd := nonFlagArgs[0] + err = cex.Execute(cmd, nonFlagArgs[1:]) + fatalLog(cmd, err) + +} diff --git a/s3/README.md b/s3/README.md index bc567be..dc4936b 100644 --- a/s3/README.md +++ b/s3/README.md @@ -1,15 +1,14 @@ -## S3 CLI +# S3 Client -A CLI for uploading, fetching and deleting content to/from an S3-compatible -blobstore. +S3 client implementation for the unified storage-cli tool. This module provides S3-compatible blobstore operations through the main storage-cli binary. -Continuous integration: +**Note:** This is not a standalone CLI. Use the main `storage-cli` binary with `-s s3` flag to access S3 functionality. -Releases can be found in `https://s3.amazonaws.com/bosh-s3cli-artifacts`. The Linux binaries follow the regex `s3cli-(\d+\.\d+\.\d+)-linux-amd64` and the windows binaries `s3cli-(\d+\.\d+\.\d+)-windows-amd64`. +For general usage and build instructions, see the [main README](../README.md). -## Usage +## S3-Specific Configuration -Given a JSON config file (`config.json`)... +The S3 client requires a JSON configuration file with the following structure: ``` json { @@ -32,67 +31,31 @@ Given a JSON config file (`config.json`)... } ``` -``` bash -# Usage -s3-cli --help +**Usage examples:** +```shell +# Upload a file to S3 +storage-cli -s s3 -c s3-config.json put local-file.txt remote-object.txt -# Command: "put" -# Upload a blob to an S3-compatible blobstore. -s3-cli -c config.json put +# Download a file from S3 +storage-cli -s s3 -c s3-config.json get remote-object.txt downloaded-file.txt -# Command: "get" -# Fetch a blob from an S3-compatible blobstore. -# Destination file will be overwritten if exists. -s3-cli -c config.json get +# Check if an object exists +storage-cli -s s3 -c s3-config.json exists remote-object.txt -# Command: "delete" -# Remove a blob from an S3-compatible blobstore. -s3-cli -c config.json delete +# List all objects +storage-cli -s s3 -c s3-config.json list -# Command: "exists" -# Checks if blob exists in an S3-compatible blobstore. -s3-cli -c config.json exists - -# Command: "sign" -# Create a self-signed url for an object -s3-cli -c config.json sign +# Delete an object +storage-cli -s s3 -c s3-config.json delete remote-object.txt ``` -## Contributing - -Follow these steps to make a contribution to the project: - -- Fork this repository -- Create a feature branch based upon the `main` branch (*pull requests must be made against this branch*) - ``` bash - git checkout -b feature-name origin/main - ``` -- Run tests to check your development environment setup - ``` bash - ginkgo --race --skip-package=integration --randomize-all --cover -v -r ./s3/... - ``` -- Make your changes (*be sure to add/update tests*) -- Run tests to check your changes - ``` bash - ginkgo --race --skip-package=integration --randomize-all --cover -v -r ./s3/... - ``` -- Push changes to your fork - ``` bash - git add . - git commit -m "Commit message" - git push origin feature-name - ``` -- Create a GitHub pull request, selecting `main` as the target branch - ## Testing ### Unit Tests -**Note:** Run the following commands from the repository root directory. - ``` bash - go install github.com/onsi/ginkgo/v2/ginkgo - - ginkgo --skip-package=integration --randomize-all --cover -v -r ./s3/... - ``` +Run unit tests from the repository root: +```bash +ginkgo --skip-package=integration --cover -v -r ./s3/... +``` ### Integration Tests diff --git a/s3/client/client.go b/s3/client/client.go index 1e92c83..647362e 100644 --- a/s3/client/client.go +++ b/s3/client/client.go @@ -1,7 +1,9 @@ package client import ( - "io" + "errors" + "log" + "os" "time" "github.com/aws/aws-sdk-go-v2/service/s3" @@ -9,17 +11,15 @@ import ( "github.com/cloudfoundry/storage-cli/s3/config" ) -type S3CompatibleClient interface { - Get(src string, dest io.WriterAt) error - Put(src io.ReadSeeker, dest string) error - Delete(dest string) error - Exists(dest string) (bool, error) - Sign(objectID string, action string, expiration time.Duration) (string, error) +type S3CompatibleClient struct { + s3cliConfig *config.S3Cli + awsS3BlobstoreClient *awsS3Client + openstackSwiftBlobstore *openstackSwiftS3Client } // New returns an S3CompatibleClient -func New(s3Client *s3.Client, s3cliConfig *config.S3Cli) S3CompatibleClient { - return &s3CompatibleClient{ +func New(s3Client *s3.Client, s3cliConfig *config.S3Cli) *S3CompatibleClient { + return &S3CompatibleClient{ s3cliConfig: s3cliConfig, openstackSwiftBlobstore: &openstackSwiftS3Client{ s3cliConfig: s3cliConfig, @@ -31,32 +31,60 @@ func New(s3Client *s3.Client, s3cliConfig *config.S3Cli) S3CompatibleClient { } } -type s3CompatibleClient struct { - s3cliConfig *config.S3Cli - awsS3BlobstoreClient *awsS3Client - openstackSwiftBlobstore *openstackSwiftS3Client -} - -func (c *s3CompatibleClient) Get(src string, dest io.WriterAt) error { - return c.awsS3BlobstoreClient.Get(src, dest) +func (c *S3CompatibleClient) Get(src string, dest string) error { + dstFile, err := os.Create(dest) + if err != nil { + return err + } + defer dstFile.Close() //nolint:errcheck + return c.awsS3BlobstoreClient.Get(src, dstFile) } -func (c *s3CompatibleClient) Put(src io.ReadSeeker, dest string) error { - return c.awsS3BlobstoreClient.Put(src, dest) +func (c *S3CompatibleClient) Put(src string, dest string) error { + sourceFile, err := os.Open(src) + if err != nil { + log.Fatalln(err) + } + defer sourceFile.Close() //nolint:errcheck + return c.awsS3BlobstoreClient.Put(sourceFile, dest) } -func (c *s3CompatibleClient) Delete(dest string) error { +func (c *S3CompatibleClient) Delete(dest string) error { return c.awsS3BlobstoreClient.Delete(dest) } -func (c *s3CompatibleClient) Exists(dest string) (bool, error) { +func (c *S3CompatibleClient) Exists(dest string) (bool, error) { return c.awsS3BlobstoreClient.Exists(dest) } -func (c *s3CompatibleClient) Sign(objectID string, action string, expiration time.Duration) (string, error) { +func (c *S3CompatibleClient) Sign(objectID string, action string, expiration time.Duration) (string, error) { if c.s3cliConfig.SwiftAuthAccount != "" { return c.openstackSwiftBlobstore.Sign(objectID, action, expiration) } return c.awsS3BlobstoreClient.Sign(objectID, action, expiration) } + +func (c *S3CompatibleClient) EnsureStorageExists() error { + return errors.New("not implemented") + +} + +func (c *S3CompatibleClient) Copy(srcBlob string, dstBlob string) error { + return errors.New("not implemented") + +} + +func (c *S3CompatibleClient) Properties(dest string) error { + return errors.New("not implemented") + +} + +func (c *S3CompatibleClient) List(prefix string) ([]string, error) { + return nil, errors.New("not implemented") + +} + +func (c *S3CompatibleClient) DeleteRecursive(prefix string) error { + return errors.New("not implemented") +} diff --git a/s3/client/client_suite_test.go b/s3/client/client_suite_test.go index 79e004c..ff455a4 100644 --- a/s3/client/client_suite_test.go +++ b/s3/client/client_suite_test.go @@ -9,5 +9,5 @@ import ( func TestClient(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "Client Suite") + RunSpecs(t, "S3 Client Suite") } diff --git a/s3/client/client_test.go b/s3/client/client_test.go index 37e463e..117bd0a 100644 --- a/s3/client/client_test.go +++ b/s3/client/client_test.go @@ -10,12 +10,13 @@ import ( "github.com/cloudfoundry/storage-cli/s3/client" "github.com/cloudfoundry/storage-cli/s3/config" + s "github.com/cloudfoundry/storage-cli/storage" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) var _ = Describe("S3CompatibleClient", func() { - var blobstoreClient client.S3CompatibleClient + var blobstoreClient s.Storager var s3Config *config.S3Cli Describe("Sign()", func() { @@ -57,7 +58,7 @@ var _ = Describe("S3CompatibleClient", func() { Context("when the action is GET", func() { BeforeEach(func() { - action = "GET" + action = "get" }) It("returns a signed URL", func() { @@ -70,7 +71,7 @@ var _ = Describe("S3CompatibleClient", func() { Context("when the action is PUT", func() { BeforeEach(func() { - action = "PUT" + action = "put" }) It("returns a signed URL", func() { @@ -117,7 +118,7 @@ var _ = Describe("S3CompatibleClient", func() { Context("when the action is GET", func() { BeforeEach(func() { - action = "GET" + action = "get" }) It("returns a signed URL", func() { @@ -130,7 +131,7 @@ var _ = Describe("S3CompatibleClient", func() { Context("when the action is PUT", func() { BeforeEach(func() { - action = "PUT" + action = "put" }) It("returns a signed URL", func() { diff --git a/s3/config/config_suite_test.go b/s3/config/config_suite_test.go index 63543a8..63162c5 100644 --- a/s3/config/config_suite_test.go +++ b/s3/config/config_suite_test.go @@ -9,5 +9,5 @@ import ( func TestClient(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "Config Suite") + RunSpecs(t, "S3 Config Suite") } diff --git a/s3/integration/assertions.go b/s3/integration/assertions.go index c2951f8..7293fde 100644 --- a/s3/integration/assertions.go +++ b/s3/integration/assertions.go @@ -5,7 +5,6 @@ import ( "fmt" "log" "os" - "strings" "time" "github.com/cloudfoundry/storage-cli/s3/client" @@ -19,6 +18,7 @@ import ( // AssertLifecycleWorks tests the main blobstore object lifecycle from creation to deletion func AssertLifecycleWorks(s3CLIPath string, cfg *config.S3Cli) { + storageType := "s3" expectedString := GenerateRandomString() s3Filename := GenerateRandomString() @@ -28,7 +28,7 @@ func AssertLifecycleWorks(s3CLIPath string, cfg *config.S3Cli) { contentFile := MakeContentFile(expectedString) defer os.Remove(contentFile) //nolint:errcheck - s3CLISession, err := RunS3CLI(s3CLIPath, configPath, "put", contentFile, s3Filename) + s3CLISession, err := RunS3CLI(s3CLIPath, configPath, storageType, "put", contentFile, s3Filename) Expect(err).ToNot(HaveOccurred()) Expect(s3CLISession.ExitCode()).To(BeZero()) @@ -39,12 +39,12 @@ func AssertLifecycleWorks(s3CLIPath string, cfg *config.S3Cli) { defer os.Remove(noFolderConfigPath) //nolint:errcheck s3CLISession, err := - RunS3CLI(s3CLIPath, noFolderConfigPath, "exists", fmt.Sprintf("%s/%s", folderName, s3Filename)) + RunS3CLI(s3CLIPath, noFolderConfigPath, storageType, "exists", fmt.Sprintf("%s/%s", folderName, s3Filename)) Expect(err).ToNot(HaveOccurred()) Expect(s3CLISession.ExitCode()).To(BeZero()) } - s3CLISession, err = RunS3CLI(s3CLIPath, configPath, "exists", s3Filename) + s3CLISession, err = RunS3CLI(s3CLIPath, configPath, storageType, "exists", s3Filename) Expect(err).ToNot(HaveOccurred()) Expect(s3CLISession.ExitCode()).To(BeZero()) Expect(s3CLISession.Err.Contents()).To(MatchRegexp("File '.*' exists in bucket '.*'")) @@ -55,7 +55,7 @@ func AssertLifecycleWorks(s3CLIPath string, cfg *config.S3Cli) { Expect(err).ToNot(HaveOccurred()) defer os.Remove(tmpLocalFile.Name()) //nolint:errcheck - s3CLISession, err = RunS3CLI(s3CLIPath, configPath, "get", s3Filename, tmpLocalFile.Name()) + s3CLISession, err = RunS3CLI(s3CLIPath, configPath, storageType, "get", s3Filename, tmpLocalFile.Name()) Expect(err).ToNot(HaveOccurred()) Expect(s3CLISession.ExitCode()).To(BeZero()) @@ -63,11 +63,11 @@ func AssertLifecycleWorks(s3CLIPath string, cfg *config.S3Cli) { Expect(err).ToNot(HaveOccurred()) Expect(string(gottenBytes)).To(Equal(expectedString)) - s3CLISession, err = RunS3CLI(s3CLIPath, configPath, "delete", s3Filename) + s3CLISession, err = RunS3CLI(s3CLIPath, configPath, storageType, "delete", s3Filename) Expect(err).ToNot(HaveOccurred()) Expect(s3CLISession.ExitCode()).To(BeZero()) - s3CLISession, err = RunS3CLI(s3CLIPath, configPath, "exists", s3Filename) + s3CLISession, err = RunS3CLI(s3CLIPath, configPath, storageType, "exists", s3Filename) Expect(err).ToNot(HaveOccurred()) Expect(s3CLISession.ExitCode()).To(Equal(3)) Expect(s3CLISession.Err.Contents()).To(MatchRegexp("File '.*' does not exist in bucket '.*'")) @@ -75,7 +75,7 @@ func AssertLifecycleWorks(s3CLIPath string, cfg *config.S3Cli) { func AssertOnPutFailures(s3CLIPath string, cfg *config.S3Cli, content, errorMessage string) { s3Filename := GenerateRandomString() - sourceContent := strings.NewReader(content) + sourceFile := MakeContentFile(content) configPath := MakeConfigFile(cfg) defer os.Remove(configPath) //nolint:errcheck @@ -92,13 +92,14 @@ func AssertOnPutFailures(s3CLIPath string, cfg *config.S3Cli, content, errorMess } blobstoreClient := client.New(s3Client, &s3Config) - err = blobstoreClient.Put(sourceContent, s3Filename) + err = blobstoreClient.Put(sourceFile, s3Filename) Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring(errorMessage)) } // AssertPutOptionsApplied asserts that `s3cli put` uploads files with the requested encryption options func AssertPutOptionsApplied(s3CLIPath string, cfg *config.S3Cli) { + storageType := "s3" expectedString := GenerateRandomString() s3Filename := GenerateRandomString() @@ -111,7 +112,7 @@ func AssertPutOptionsApplied(s3CLIPath string, cfg *config.S3Cli) { configFile, err := os.Open(configPath) Expect(err).ToNot(HaveOccurred()) - s3CLISession, err := RunS3CLI(s3CLIPath, configPath, "put", contentFile, s3Filename) //nolint:ineffassign,staticcheck + s3CLISession, err := RunS3CLI(s3CLIPath, configPath, storageType, "put", contentFile, s3Filename) //nolint:ineffassign,staticcheck Expect(err).ToNot(HaveOccurred()) Expect(s3CLISession.ExitCode()).To(BeZero()) @@ -139,10 +140,11 @@ func AssertPutOptionsApplied(s3CLIPath string, cfg *config.S3Cli) { // AssertGetNonexistentFails asserts that `s3cli get` on a non-existent object will fail func AssertGetNonexistentFails(s3CLIPath string, cfg *config.S3Cli) { + storageType := "s3" configPath := MakeConfigFile(cfg) defer os.Remove(configPath) //nolint:errcheck - s3CLISession, err := RunS3CLI(s3CLIPath, configPath, "get", "non-existent-file", "/dev/null") + s3CLISession, err := RunS3CLI(s3CLIPath, configPath, storageType, "get", "non-existent-file", "/dev/null") Expect(err).ToNot(HaveOccurred()) Expect(s3CLISession.ExitCode()).ToNot(BeZero()) Expect(s3CLISession.Err.Contents()).To(ContainSubstring("NoSuchKey")) @@ -151,17 +153,18 @@ func AssertGetNonexistentFails(s3CLIPath string, cfg *config.S3Cli) { // AssertDeleteNonexistentWorks asserts that `s3cli delete` on a non-existent // object exits with status 0 (tests idempotency) func AssertDeleteNonexistentWorks(s3CLIPath string, cfg *config.S3Cli) { + storageType := "s3" configPath := MakeConfigFile(cfg) defer os.Remove(configPath) //nolint:errcheck - s3CLISession, err := RunS3CLI(s3CLIPath, configPath, "delete", "non-existent-file") + s3CLISession, err := RunS3CLI(s3CLIPath, configPath, storageType, "delete", "non-existent-file") Expect(err).ToNot(HaveOccurred()) Expect(s3CLISession.ExitCode()).To(BeZero()) } func AssertOnMultipartUploads(s3CLIPath string, cfg *config.S3Cli, content string) { s3Filename := GenerateRandomString() - sourceContent := strings.NewReader(content) + sourceFile := MakeContentFile(content) configPath := MakeConfigFile(cfg) defer os.Remove(configPath) //nolint:errcheck @@ -181,7 +184,7 @@ func AssertOnMultipartUploads(s3CLIPath string, cfg *config.S3Cli, content strin blobstoreClient := client.New(s3Client, &s3Config) - err = blobstoreClient.Put(sourceContent, s3Filename) + err = blobstoreClient.Put(sourceFile, s3Filename) Expect(err).ToNot(HaveOccurred()) switch cfg.Host { diff --git a/s3/integration/aws_assume_role_test.go b/s3/integration/aws_assume_role_test.go index 0b43f2b..af74fdd 100644 --- a/s3/integration/aws_assume_role_test.go +++ b/s3/integration/aws_assume_role_test.go @@ -13,6 +13,7 @@ import ( var _ = Describe("Testing AWS assume role ", func() { Context("with AWS ASSUME ROLE configurations", func() { It("get file from assumed role", func() { + storageType := "s3" accessKeyID := os.Getenv("ACCESS_KEY_ID") Expect(accessKeyID).ToNot(BeEmpty(), "ACCESS_KEY_ID must be set") @@ -46,14 +47,16 @@ var _ = Describe("Testing AWS assume role ", func() { notAssumeRoleConfigPath := integration.MakeConfigFile(nonAssumedRoleCfg) defer os.Remove(notAssumeRoleConfigPath) //nolint:errcheck - s3CLISession, err := integration.RunS3CLI(s3CLIPath, notAssumeRoleConfigPath, "exists", s3Filename) + s3CLISession, err := integration.RunS3CLI(s3CLIPath, notAssumeRoleConfigPath, storageType, "exists", s3Filename) + GinkgoWriter.Println("error is %v", err) Expect(err).ToNot(HaveOccurred()) Expect(s3CLISession.ExitCode()).ToNot(BeZero()) assumeRoleConfigPath := integration.MakeConfigFile(assumedRoleCfg) defer os.Remove(assumeRoleConfigPath) //nolint:errcheck - s3CLISession, err = integration.RunS3CLI(s3CLIPath, assumeRoleConfigPath, "exists", s3Filename) + s3CLISession, err = integration.RunS3CLI(s3CLIPath, assumeRoleConfigPath, storageType, "exists", s3Filename) + GinkgoWriter.Println("error is %v", err) Expect(err).ToNot(HaveOccurred()) Expect(s3CLISession.ExitCode()).To(BeZero()) }) diff --git a/s3/integration/aws_isolated_region_test.go b/s3/integration/aws_isolated_region_test.go index 276fe23..c186a25 100644 --- a/s3/integration/aws_isolated_region_test.go +++ b/s3/integration/aws_isolated_region_test.go @@ -13,6 +13,7 @@ import ( var _ = Describe("Testing in any AWS region isolated from the US standard regions (i.e., cn-north-1)", func() { Context("with AWS ISOLATED REGION (static creds) configurations", func() { It("fails with a config that specifies a valid region but invalid host", func() { + storageType := "s3" accessKeyID := os.Getenv("ACCESS_KEY_ID") Expect(accessKeyID).ToNot(BeEmpty(), "ACCESS_KEY_ID must be set") @@ -42,12 +43,12 @@ var _ = Describe("Testing in any AWS region isolated from the US standard region contentFile := integration.MakeContentFile("test") defer os.Remove(contentFile) //nolint:errcheck - s3CLISession, err := integration.RunS3CLI(s3CLIPath, configPath, "put", contentFile, s3Filename) + s3CLISession, err := integration.RunS3CLI(s3CLIPath, configPath, storageType, "put", contentFile, s3Filename) Expect(err).ToNot(HaveOccurred()) Expect(s3CLISession.ExitCode()).ToNot(BeZero()) Expect(s3CLISession.Err.Contents()).To(ContainSubstring("AuthorizationHeaderMalformed")) - s3CLISession, err = integration.RunS3CLI(s3CLIPath, configPath, "delete", s3Filename) + s3CLISession, err = integration.RunS3CLI(s3CLIPath, configPath, storageType, "delete", s3Filename) Expect(err).ToNot(HaveOccurred()) Expect(s3CLISession.ExitCode()).ToNot(BeZero()) Expect(s3CLISession.Err.Contents()).To(ContainSubstring("AuthorizationHeaderMalformed")) diff --git a/s3/integration/aws_public_read_only_test.go b/s3/integration/aws_public_read_only_test.go index 0461a69..a191d3c 100644 --- a/s3/integration/aws_public_read_only_test.go +++ b/s3/integration/aws_public_read_only_test.go @@ -19,6 +19,7 @@ import ( var _ = Describe("Testing gets against a public AWS S3 bucket", func() { Context("with PUBLIC READ ONLY (no creds) configuration", func() { It("can successfully get a publicly readable file", func() { + storageType := "s3" accessKeyID := os.Getenv("ACCESS_KEY_ID") Expect(accessKeyID).ToNot(BeEmpty(), "ACCESS_KEY_ID must be set") @@ -57,7 +58,7 @@ var _ = Describe("Testing gets against a public AWS S3 bucket", func() { configPath := integration.MakeConfigFile(cfg) defer os.Remove(configPath) //nolint:errcheck - s3CLISession, err := integration.RunS3CLI(s3CLIPath, configPath, "get", s3Filename, "public-file") + s3CLISession, err := integration.RunS3CLI(s3CLIPath, configPath, storageType, "get", s3Filename, "public-file") Expect(err).ToNot(HaveOccurred()) defer os.Remove("public-file") //nolint:errcheck @@ -67,7 +68,7 @@ var _ = Describe("Testing gets against a public AWS S3 bucket", func() { Expect(err).ToNot(HaveOccurred()) Expect(string(gottenBytes)).To(Equal(s3FileContents)) - s3CLISession, err = integration.RunS3CLI(s3CLIPath, configPath, "exists", s3Filename) + s3CLISession, err = integration.RunS3CLI(s3CLIPath, configPath, storageType, "exists", s3Filename) Expect(err).ToNot(HaveOccurred()) Expect(s3CLISession.ExitCode()).To(BeZero()) Expect(s3CLISession.Err.Contents()).To(MatchRegexp("File '.*' exists in bucket '.*'")) diff --git a/s3/integration/aws_v4_only_region_test.go b/s3/integration/aws_v4_only_region_test.go index 8508598..539dab7 100644 --- a/s3/integration/aws_v4_only_region_test.go +++ b/s3/integration/aws_v4_only_region_test.go @@ -13,6 +13,7 @@ import ( var _ = Describe("Testing in any AWS region that only supports v4 signature version", func() { Context("with AWS V4 ONLY REGION (static creds) configurations", func() { It("fails with a config that specifies signature version 2", func() { + storageType := "s3" accessKeyID := os.Getenv("ACCESS_KEY_ID") Expect(accessKeyID).ToNot(BeEmpty(), "ACCESS_KEY_ID must be set") @@ -40,11 +41,11 @@ var _ = Describe("Testing in any AWS region that only supports v4 signature vers contentFile := integration.MakeContentFile("test") defer os.Remove(contentFile) //nolint:errcheck - s3CLISession, err := integration.RunS3CLI(s3CLIPath, configPath, "put", contentFile, s3Filename) + s3CLISession, err := integration.RunS3CLI(s3CLIPath, configPath, storageType, "put", contentFile, s3Filename) Expect(err).ToNot(HaveOccurred()) Expect(s3CLISession.ExitCode()).ToNot(BeZero()) - s3CLISession, err = integration.RunS3CLI(s3CLIPath, configPath, "delete", s3Filename) + s3CLISession, err = integration.RunS3CLI(s3CLIPath, configPath, storageType, "delete", s3Filename) Expect(err).ToNot(HaveOccurred()) Expect(s3CLISession.ExitCode()).ToNot(BeZero()) }) diff --git a/s3/integration/integration_suite_test.go b/s3/integration/integration_suite_test.go index 2abd83d..0f5aa75 100644 --- a/s3/integration/integration_suite_test.go +++ b/s3/integration/integration_suite_test.go @@ -27,7 +27,7 @@ var _ = BeforeSuite(func() { if len(s3CLIPath) == 0 { var err error - s3CLIPath, err = gexec.Build("github.com/cloudfoundry/storage-cli/s3") + s3CLIPath, err = gexec.Build("github.com/cloudfoundry/storage-cli") Expect(err).ShouldNot(HaveOccurred()) } }) diff --git a/s3/integration/swift_signed_url_test.go b/s3/integration/swift_signed_url_test.go index c0c2805..6bd502e 100644 --- a/s3/integration/swift_signed_url_test.go +++ b/s3/integration/swift_signed_url_test.go @@ -16,6 +16,7 @@ var _ = Describe("Testing for working signed URLs all Swift/OpenStack regions", var configPath string var contentFile string var defaultConfig config.S3Cli + storageType := "s3" accessKeyID := os.Getenv("ACCESS_KEY_ID") secretAccessKey := os.Getenv("SECRET_ACCESS_KEY") @@ -54,13 +55,13 @@ var _ = Describe("Testing for working signed URLs all Swift/OpenStack regions", Describe("Invoking `sign`", func() { It("returns 0 for an existing blob", func() { - cliSession, err := integration.RunS3CLI(s3CLIPath, configPath, "sign", "some-blob", "get", "60s") + cliSession, err := integration.RunS3CLI(s3CLIPath, configPath, storageType, "sign", "some-blob", "get", "60s") Expect(err).ToNot(HaveOccurred()) Expect(cliSession.ExitCode()).To(BeZero()) getUrl := bytes.NewBuffer(cliSession.Out.Contents()).String() Expect(getUrl).To(MatchRegexp("https://" + swiftHost + ".*?" + "/some-blob")) - cliSession, err = integration.RunS3CLI(s3CLIPath, configPath, "sign", "some-blob", "put", "60s") + cliSession, err = integration.RunS3CLI(s3CLIPath, configPath, storageType, "sign", "some-blob", "put", "60s") Expect(err).ToNot(HaveOccurred()) putUrl := bytes.NewBuffer(cliSession.Out.Contents()).String() diff --git a/s3/integration/utils.go b/s3/integration/utils.go index ba2faf2..3aa4bbd 100644 --- a/s3/integration/utils.go +++ b/s3/integration/utils.go @@ -55,14 +55,16 @@ func MakeContentFile(content string) string { } // RunS3CLI runs the s3cli and outputs the session after waiting for it to finish -func RunS3CLI(s3CLIPath string, configPath string, subcommand string, args ...string) (*gexec.Session, error) { +func RunS3CLI(cliPath string, configPath string, storageType string, subcommand string, args ...string) (*gexec.Session, error) { cmdArgs := []string{ "-c", configPath, + "-s", + storageType, subcommand, } cmdArgs = append(cmdArgs, args...) - command := exec.Command(s3CLIPath, cmdArgs...) + command := exec.Command(cliPath, cmdArgs...) session, err := gexec.Start(command, GinkgoWriter, GinkgoWriter) if err != nil { return nil, err diff --git a/s3/main.go b/s3/main.go deleted file mode 100644 index 1c0c62b..0000000 --- a/s3/main.go +++ /dev/null @@ -1,130 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "log" - "os" - "time" - - "github.com/cloudfoundry/storage-cli/s3/client" - "github.com/cloudfoundry/storage-cli/s3/config" -) - -var version string - -func main() { - configPath := flag.String("c", "", "configuration path") - showVer := flag.Bool("v", false, "version") - flag.Parse() - - if *showVer { - fmt.Printf("version %s\n", version) - os.Exit(0) - } - - configFile, err := os.Open(*configPath) - if err != nil { - log.Fatalln(err) - } - - s3Config, err := config.NewFromReader(configFile) - if err != nil { - log.Fatalln(err) - } - - s3Client, err := client.NewAwsS3Client(&s3Config) - if err != nil { - log.Fatalln(err) - } - - blobstoreClient := client.New(s3Client, &s3Config) - - nonFlagArgs := flag.Args() - if len(nonFlagArgs) < 2 { - log.Fatalf("Expected at least two arguments got %d\n", len(nonFlagArgs)) - } - - cmd := nonFlagArgs[0] - - switch cmd { - case "put": - if len(nonFlagArgs) != 3 { - log.Fatalf("Put method expected 3 arguments got %d\n", len(nonFlagArgs)) - } - src, dst := nonFlagArgs[1], nonFlagArgs[2] - - var sourceFile *os.File - sourceFile, err = os.Open(src) - if err != nil { - log.Fatalln(err) - } - - defer sourceFile.Close() //nolint:errcheck - err = blobstoreClient.Put(sourceFile, dst) - case "get": - if len(nonFlagArgs) != 3 { - log.Fatalf("Get method expected 3 arguments got %d\n", len(nonFlagArgs)) - } - src, dst := nonFlagArgs[1], nonFlagArgs[2] - - var dstFile *os.File - dstFile, err = os.Create(dst) - if err != nil { - log.Fatalln(err) - } - - defer dstFile.Close() //nolint:errcheck - err = blobstoreClient.Get(src, dstFile) - case "delete": - if len(nonFlagArgs) != 2 { - log.Fatalf("Delete method expected 2 arguments got %d\n", len(nonFlagArgs)) - } - - err = blobstoreClient.Delete(nonFlagArgs[1]) - case "exists": - if len(nonFlagArgs) != 2 { - log.Fatalf("Exists method expected 2 arguments got %d\n", len(nonFlagArgs)) - } - - var exists bool - exists, err = blobstoreClient.Exists(nonFlagArgs[1]) - - // If the object exists the exit status is 0, otherwise it is 3 - // We are using `3` since `1` and `2` have special meanings - if err == nil && !exists { - os.Exit(3) - } - case "sign": - if len(nonFlagArgs) != 4 { - log.Fatalf("Sign method expects 3 arguments got %d\n", len(nonFlagArgs)-1) - } - - objectID, action := nonFlagArgs[1], nonFlagArgs[2] - - if action != "get" && action != "put" { - log.Fatalf("Action not implemented: %s. Available actions are 'get' and 'put'", action) - } - - expiration, err := time.ParseDuration(nonFlagArgs[3]) - if err != nil { - log.Fatalf("Expiration should be in the format of a duration i.e. 1h, 60m, 3600s. Got: %s", nonFlagArgs[3]) - } - - signedURL, err := blobstoreClient.Sign(objectID, action, expiration) - - if err != nil { - log.Fatalf("Failed to sign request: %s", err) - os.Exit(1) - } - - fmt.Print(signedURL) - os.Exit(0) - default: - log.Fatalf("unknown command: '%s'\n", cmd) - } - - if err != nil { - log.Fatalf("performing operation %s: %s\n", cmd, err) - } -} diff --git a/storage/commandexecuter.go b/storage/commandexecuter.go new file mode 100644 index 0000000..179a8e4 --- /dev/null +++ b/storage/commandexecuter.go @@ -0,0 +1,146 @@ +package storage + +import ( + "fmt" + "os" + "strings" + "time" +) + +type NotExistsError struct{} + +func (e *NotExistsError) Error() string { + return "object does not exist" +} + +type CommandExecuter struct { + str Storager +} + +func NewCommandExecuter(s Storager) *CommandExecuter { + return &CommandExecuter{str: s} +} + +func (sty *CommandExecuter) SetStorager(s Storager) { + sty.str = s +} + +func (sty *CommandExecuter) Execute(cmd string, nonFlagArgs []string) error { + + switch cmd { + case "put": + if len(nonFlagArgs) != 2 { + return fmt.Errorf("put method expected 2 arguments got %d", len(nonFlagArgs)) + } + sourceFilePath, dst := nonFlagArgs[0], nonFlagArgs[1] + + _, err := os.Stat(sourceFilePath) + if err != nil { + return fmt.Errorf("%w", err) + } + return sty.str.Put(sourceFilePath, dst) + + case "get": + if len(nonFlagArgs) != 2 { + return fmt.Errorf("get method expected 2 arguments got %d", len(nonFlagArgs)) + } + src, dst := nonFlagArgs[0], nonFlagArgs[1] + return sty.str.Get(src, dst) + + case "copy": + if len(nonFlagArgs) != 2 { + return fmt.Errorf("copy method expected 2 arguments got %d", len(nonFlagArgs)) + } + + srcBlob, dstBlob := nonFlagArgs[0], nonFlagArgs[1] + return sty.str.Copy(srcBlob, dstBlob) + + case "delete": + if len(nonFlagArgs) != 1 { + return fmt.Errorf("delete method expected 1 argument got %d", len(nonFlagArgs)) + } + return sty.str.Delete(nonFlagArgs[0]) + + case "delete-recursive": + var prefix string + if len(nonFlagArgs) > 1 { + return fmt.Errorf("delete-recursive takes at most 1 argument (prefix) got %d", len(nonFlagArgs)) + } + if len(nonFlagArgs) == 1 { + prefix = nonFlagArgs[0] + } + + return sty.str.DeleteRecursive(prefix) + + case "exists": + if len(nonFlagArgs) != 1 { + return fmt.Errorf("exists method expected 1 argument got %d", len(nonFlagArgs)) + } + + exists, err := sty.str.Exists(nonFlagArgs[0]) + if err == nil && !exists { + return &NotExistsError{} + } + if err != nil { + return fmt.Errorf("failed to check exist: %w", err) + } + + case "sign": + if len(nonFlagArgs) != 3 { + return fmt.Errorf("sign method expects 3 arguments got %d", len(nonFlagArgs)) + } + + objectID, action := nonFlagArgs[0], nonFlagArgs[1] + action = strings.ToLower(action) + if action != "get" && action != "put" { + return fmt.Errorf("action not implemented: %s. Available actions are 'get' and 'put'", action) + } + + expiration, err := time.ParseDuration(nonFlagArgs[2]) + if err != nil { + return fmt.Errorf("expiration should be in the format of a duration i.e. 1h, 60m, 3600s. Got: %s", nonFlagArgs[2]) + } + + signedURL, err := sty.str.Sign(objectID, action, expiration) + if err != nil { + return fmt.Errorf("failed to sign request: %w", err) + } + fmt.Print(signedURL) + + case "list": + var prefix string + if len(nonFlagArgs) > 1 { + return fmt.Errorf("list method takes at most 1 argument (prefix) got %d", len(nonFlagArgs)) + } + if len(nonFlagArgs) == 1 { + prefix = nonFlagArgs[0] + } + + var objects []string + objects, err := sty.str.List(prefix) + if err != nil { + return fmt.Errorf("failed to list objects: %w", err) + } + + for _, object := range objects { + fmt.Println(object) + } + + case "properties": + if len(nonFlagArgs) != 1 { + return fmt.Errorf("properties method expected 1 argument got %d", len(nonFlagArgs)) + } + return sty.str.Properties(nonFlagArgs[0]) + + case "ensure-storage-exists": + if len(nonFlagArgs) != 0 { + return fmt.Errorf("ensureStorageExists method expected 0 argument got %d", len(nonFlagArgs)) + } + return sty.str.EnsureStorageExists() + + default: + return fmt.Errorf("unknown command: '%s'", cmd) + } + + return nil +} diff --git a/storage/commandexecuter_test.go b/storage/commandexecuter_test.go new file mode 100644 index 0000000..d08fb0f --- /dev/null +++ b/storage/commandexecuter_test.go @@ -0,0 +1,240 @@ +package storage + +import ( + "errors" + "fmt" + "os" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("Execute Command", func() { + var sourceFileName = "some-source-file-command-executer" + var commandExecuter *CommandExecuter + var fakeStorager *FakeStorager + var tempFile *os.File + + BeforeEach(func() { + fakeStorager = &FakeStorager{} + commandExecuter = &CommandExecuter{str: fakeStorager} + }) + + Context("Put", func() { + It("Successfull", func() { + tempFile, _ = os.CreateTemp("", sourceFileName) //nolint:errcheck + tempFile.Close() //nolint:errcheck + DeferCleanup(func() { + os.Remove(tempFile.Name()) //nolint:errcheck + }) + err := commandExecuter.Execute("put", []string{tempFile.Name(), "destination"}) + Expect(fakeStorager.PutCallCount()).To(BeEquivalentTo(1)) + Expect(err).ToNot(HaveOccurred()) + + }) + + It("No Source File", func() { + err := commandExecuter.Execute("put", []string{"source", "destination"}) + Expect(errors.Unwrap(err).Error()).To(ContainSubstring("no such file or directory")) + }) + + It("Wrong number of parameters", func() { + err := commandExecuter.Execute("put", []string{"source"}) + Expect(err.Error()).To(ContainSubstring("put method expected 2 arguments got")) + }) + + }) + + Context("Get", func() { + It("Successfull", func() { + err := commandExecuter.Execute("get", []string{"source", "destination"}) + Expect(fakeStorager.GetCallCount()).To(BeEquivalentTo(1)) + Expect(err).ToNot(HaveOccurred()) + + }) + + It("Wrong number of parameters", func() { + err := commandExecuter.Execute("get", []string{"source"}) + Expect(err.Error()).To(ContainSubstring("get method expected 2 arguments got")) + }) + + }) + + Context("Copy", func() { + It("Successfull", func() { + err := commandExecuter.Execute("copy", []string{"source", "destination"}) + Expect(fakeStorager.CopyCallCount()).To(BeEquivalentTo(1)) + Expect(err).ToNot(HaveOccurred()) + + }) + + It("Wrong number of parameters", func() { + err := commandExecuter.Execute("copy", []string{"source"}) + Expect(err.Error()).To(ContainSubstring("copy method expected 2 arguments got")) + }) + + }) + + Context("Delete", func() { + It("Successfull", func() { + err := commandExecuter.Execute("delete", []string{"destination"}) + Expect(fakeStorager.DeleteCallCount()).To(BeEquivalentTo(1)) + Expect(err).ToNot(HaveOccurred()) + + }) + + It("Wrong number of parameters", func() { + err := commandExecuter.Execute("delete", []string{}) + Expect(err.Error()).To(ContainSubstring("delete method expected 1 argument got")) + }) + + }) + + Context("Delete-Recursive", func() { + It("Successfull", func() { + err := commandExecuter.Execute("delete-recursive", []string{}) + Expect(fakeStorager.DeleteRecursiveCallCount()).To(BeEquivalentTo(1)) + Expect(fakeStorager.deleteRecursiveArgsForCall[0].arg1).To(Equal("")) + Expect(err).ToNot(HaveOccurred()) + + }) + + It("Successfull With Prefix", func() { + err := commandExecuter.Execute("delete-recursive", []string{"prefix"}) + Expect(fakeStorager.DeleteRecursiveCallCount()).To(BeEquivalentTo(1)) + Expect(fakeStorager.deleteRecursiveArgsForCall[0].arg1).To(Equal("prefix")) + Expect(err).ToNot(HaveOccurred()) + + }) + + It("Wrong number of parameters", func() { + err := commandExecuter.Execute("delete-recursive", []string{"prefix", "extra-prefix"}) + Expect(err.Error()).To(ContainSubstring("delete-recursive takes at most 1 argument (prefix) got")) + }) + + }) + + Context("Exists", func() { + It("Successfull", func() { + fakeStorager.ExistsStub = func(file string) (bool, error) { + return true, nil + } + err := commandExecuter.Execute("exists", []string{"object"}) + + Expect(fakeStorager.ExistsCallCount()).To(BeEquivalentTo(1)) + Expect(err).ToNot(HaveOccurred()) + + }) + + It("Not found", func() { + fakeStorager.ExistsStub = func(file string) (bool, error) { + return false, nil + } + + err := commandExecuter.Execute("exists", []string{"object"}) + Expect(fakeStorager.ExistsCallCount()).To(BeEquivalentTo(1)) + Expect(err).To(BeAssignableToTypeOf(&NotExistsError{})) + + }) + + It("Wrong number of parameters", func() { + err := commandExecuter.Execute("exists", []string{"object", "extra-object"}) + Expect(err.Error()).To(ContainSubstring("exists method expected 1 argument got")) + }) + + }) + + Context("Sign", func() { + It("Successfull", func() { + err := commandExecuter.Execute("sign", []string{"object", "put", "10s"}) + + Expect(fakeStorager.SignCallCount()).To(BeEquivalentTo(1)) + Expect(err).ToNot(HaveOccurred()) + + }) + + It("Wrong action", func() { + err := commandExecuter.Execute("sign", []string{"object", "delete", "10s"}) + Expect(err.Error()).To(ContainSubstring(fmt.Sprintf("action not implemented: %s. Available actions are 'get' and 'put'", "delete"))) + + }) + + It("Wrong time format", func() { + err := commandExecuter.Execute("sign", []string{"object", "put", "10"}) + Expect(err.Error()).To(ContainSubstring(fmt.Sprintf("expiration should be in the format of a duration i.e. 1h, 60m, 3600s. Got: %s", "10"))) + + }) + + It("Wrong number of parameters", func() { + err := commandExecuter.Execute("sign", []string{"object", "put"}) + Expect(err.Error()).To(ContainSubstring("sign method expects 3 arguments got")) + + }) + + }) + + Context("List", func() { + It("Successfull", func() { + err := commandExecuter.Execute("list", []string{}) + + Expect(fakeStorager.ListCallCount()).To(BeEquivalentTo(1)) + Expect(fakeStorager.listArgsForCall[0].arg1).To(Equal("")) + Expect(err).ToNot(HaveOccurred()) + + }) + + It("With Prefix", func() { + err := commandExecuter.Execute("exists", []string{"prefix"}) + Expect(fakeStorager.ExistsCallCount()).To(BeEquivalentTo(1)) + Expect(fakeStorager.existsArgsForCall[0].arg1).To(Equal("prefix")) + Expect(err).To(BeAssignableToTypeOf(&NotExistsError{})) + + }) + + It("Wrong number of parameters", func() { + err := commandExecuter.Execute("list", []string{"prefix-1", "prefix-2"}) + Expect(err.Error()).To(ContainSubstring("list method takes at most 1 argument (prefix) got")) + }) + + }) + + Context("Properties", func() { + It("Successfull", func() { + err := commandExecuter.Execute("properties", []string{"object"}) + Expect(fakeStorager.PropertiesCallCount()).To(BeEquivalentTo(1)) + Expect(err).ToNot(HaveOccurred()) + + }) + + It("Wrong number of parameters", func() { + err := commandExecuter.Execute("properties", []string{}) + Expect(err.Error()).To(ContainSubstring("properties method expected 1 argument got")) + }) + + }) + + Context("Ensure storage exists", func() { + It("Successfull", func() { + err := commandExecuter.Execute("ensure-storage-exists", []string{}) + Expect(fakeStorager.EnsureStorageExistsCallCount()).To(BeEquivalentTo(1)) + Expect(err).ToNot(HaveOccurred()) + + }) + + It("Wrong number of parameters", func() { + err := commandExecuter.Execute("ensure-storage-exists", []string{"extra-parameter"}) + Expect(err.Error()).To(ContainSubstring("ensureStorageExists method expected 0 argument got")) + }) + + }) + + Context("Unsupported command", func() { + It("Successfull", func() { + err := commandExecuter.Execute("unsupported-command", []string{}) + Expect(err.Error()).To(ContainSubstring("unknown command: '%s'", "unsupported-command")) + + }) + + }) + +}) diff --git a/storage/factory.go b/storage/factory.go new file mode 100644 index 0000000..a64634d --- /dev/null +++ b/storage/factory.go @@ -0,0 +1,118 @@ +package storage + +import ( + "context" + "fmt" + "os" + + boshlog "github.com/cloudfoundry/bosh-utils/logger" + alioss "github.com/cloudfoundry/storage-cli/alioss/client" + aliossconfig "github.com/cloudfoundry/storage-cli/alioss/config" + azurebs "github.com/cloudfoundry/storage-cli/azurebs/client" + azureconfigbs "github.com/cloudfoundry/storage-cli/azurebs/config" + davapp "github.com/cloudfoundry/storage-cli/dav/app" + davcmd "github.com/cloudfoundry/storage-cli/dav/cmd" + davconfig "github.com/cloudfoundry/storage-cli/dav/config" + gcs "github.com/cloudfoundry/storage-cli/gcs/client" + gcsconfig "github.com/cloudfoundry/storage-cli/gcs/config" + s3 "github.com/cloudfoundry/storage-cli/s3/client" + s3config "github.com/cloudfoundry/storage-cli/s3/config" +) + +var newAzurebsClient = func(configFile *os.File) (Storager, error) { + conf, err := azureconfigbs.NewFromReader(configFile) + if err != nil { + return nil, err + } + + sc, err := azurebs.NewStorageClient(conf) + if err != nil { + return nil, err + } + + azClient, err := azurebs.New(sc) + if err != nil { + return nil, err + } + return &azClient, nil +} + +var newAliossClient = func(configFile *os.File) (Storager, error) { + aliConfig, err := aliossconfig.NewFromReader(configFile) + if err != nil { + return nil, err + } + + storageClient, err := alioss.NewStorageClient(aliConfig) + if err != nil { + return nil, err + } + + aliClient, err := alioss.New(storageClient) + if err != nil { + return nil, err + } + + return &aliClient, nil +} + +var newGcsClient = func(configFile *os.File) (Storager, error) { + gcsConfig, err := gcsconfig.NewFromReader(configFile) + if err != nil { + return nil, err + } + + ctx := context.Background() + gcsClient, err := gcs.New(ctx, &gcsConfig) + if err != nil { + return nil, err + } + return gcsClient, nil + +} + +var newS3Client = func(configFile *os.File) (Storager, error) { + s3Config, err := s3config.NewFromReader(configFile) + if err != nil { + return nil, err + } + + s3Client, err := s3.NewAwsS3Client(&s3Config) + if err != nil { + return nil, err + } + + return s3.New(s3Client, &s3Config), nil + +} + +var newDavClient = func(configFile *os.File) (Storager, error) { + davConfig, err := davconfig.NewFromReader(configFile) + if err != nil { + return nil, err + } + + logger := boshlog.NewLogger(boshlog.LevelNone) + cmdFactory := davcmd.NewFactory(logger) + + cmdRunner := davcmd.NewRunner(cmdFactory) + + return davapp.New(cmdRunner, davConfig), nil +} + +func NewStorageClient(storageType string, configFile *os.File) (Storager, error) { + switch storageType { + case "azurebs": + return newAzurebsClient(configFile) + case "alioss": + return newAliossClient(configFile) + case "s3": + return newS3Client(configFile) + case "gcs": + return newGcsClient(configFile) + case "dav": + return newDavClient(configFile) + default: + return nil, fmt.Errorf("storage %s not implemented", storageType) + } +} diff --git a/storage/factory_test.go b/storage/factory_test.go new file mode 100644 index 0000000..11ab415 --- /dev/null +++ b/storage/factory_test.go @@ -0,0 +1,129 @@ +package storage + +import ( + "os" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("Factory", func() { + Describe("New", func() { + + var configFile *os.File + BeforeEach(func() { + configFile, _ = os.CreateTemp("", "some-config-file") ////nolint:errcheck + + }) + AfterEach(func() { + configFile.Close() //nolint:errcheck + os.Remove("some-config-file") //nolint:errcheck + }) + + Context("alioss", func() { + It("Create a client", func() { + original := newAliossClient + DeferCleanup(func() { + newAliossClient = original + }) + + mockClient := &FakeStorager{} + newAliossClient = func(configFile *os.File) (Storager, error) { + return mockClient, nil + } + + client, err := NewStorageClient("alioss", configFile) + Expect(client).ToNot(BeNil()) + Expect(err).ToNot(HaveOccurred()) + Expect(client).To(Equal(mockClient)) + }) + + }) + + Context("azurebs", func() { + It("Create a client", func() { + original := newAzurebsClient + DeferCleanup(func() { + newAzurebsClient = original + }) + + mockClient := &FakeStorager{} + newAzurebsClient = func(configFile *os.File) (Storager, error) { + return mockClient, nil + } + + client, err := NewStorageClient("azurebs", configFile) + Expect(client).ToNot(BeNil()) + Expect(err).ToNot(HaveOccurred()) + Expect(client).To(Equal(mockClient)) + }) + + }) + + Context("dav", func() { + It("Create a client", func() { + original := newDavClient + DeferCleanup(func() { + newDavClient = original + }) + + mockClient := &FakeStorager{} + newDavClient = func(configFile *os.File) (Storager, error) { + return mockClient, nil + } + + client, err := NewStorageClient("dav", configFile) + Expect(client).ToNot(BeNil()) + Expect(err).ToNot(HaveOccurred()) + Expect(client).To(Equal(mockClient)) + }) + + }) + + Context("gcs", func() { + It("Create a client", func() { + original := newGcsClient + DeferCleanup(func() { + newGcsClient = original + }) + + mockClient := &FakeStorager{} + newGcsClient = func(configFile *os.File) (Storager, error) { + return mockClient, nil + } + + client, err := NewStorageClient("gcs", configFile) + Expect(client).ToNot(BeNil()) + Expect(err).ToNot(HaveOccurred()) + Expect(client).To(Equal(mockClient)) + }) + + }) + + Context("s3", func() { + It("Create a client", func() { + original := newS3Client + DeferCleanup(func() { + newS3Client = original + }) + + mockClient := &FakeStorager{} + newS3Client = func(configFile *os.File) (Storager, error) { + return mockClient, nil + } + + client, err := NewStorageClient("s3", configFile) + Expect(client).ToNot(BeNil()) + Expect(err).ToNot(HaveOccurred()) + Expect(client).To(Equal(mockClient)) + }) + + }) + + It("Unimplemented Client", func() { + client, err := NewStorageClient("random-client", configFile) + Expect(err).To(HaveOccurred()) + Expect(client).To(BeNil()) + }) + }) +}) diff --git a/storage/fake_storager_test.go b/storage/fake_storager_test.go new file mode 100644 index 0000000..c4f8848 --- /dev/null +++ b/storage/fake_storager_test.go @@ -0,0 +1,772 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package storage + +import ( + "sync" + "time" +) + +type FakeStorager struct { + CopyStub func(string, string) error + copyMutex sync.RWMutex + copyArgsForCall []struct { + arg1 string + arg2 string + } + copyReturns struct { + result1 error + } + copyReturnsOnCall map[int]struct { + result1 error + } + DeleteStub func(string) error + deleteMutex sync.RWMutex + deleteArgsForCall []struct { + arg1 string + } + deleteReturns struct { + result1 error + } + deleteReturnsOnCall map[int]struct { + result1 error + } + DeleteRecursiveStub func(string) error + deleteRecursiveMutex sync.RWMutex + deleteRecursiveArgsForCall []struct { + arg1 string + } + deleteRecursiveReturns struct { + result1 error + } + deleteRecursiveReturnsOnCall map[int]struct { + result1 error + } + EnsureStorageExistsStub func() error + ensureStorageExistsMutex sync.RWMutex + ensureStorageExistsArgsForCall []struct { + } + ensureStorageExistsReturns struct { + result1 error + } + ensureStorageExistsReturnsOnCall map[int]struct { + result1 error + } + ExistsStub func(string) (bool, error) + existsMutex sync.RWMutex + existsArgsForCall []struct { + arg1 string + } + existsReturns struct { + result1 bool + result2 error + } + existsReturnsOnCall map[int]struct { + result1 bool + result2 error + } + GetStub func(string, string) error + getMutex sync.RWMutex + getArgsForCall []struct { + arg1 string + arg2 string + } + getReturns struct { + result1 error + } + getReturnsOnCall map[int]struct { + result1 error + } + ListStub func(string) ([]string, error) + listMutex sync.RWMutex + listArgsForCall []struct { + arg1 string + } + listReturns struct { + result1 []string + result2 error + } + listReturnsOnCall map[int]struct { + result1 []string + result2 error + } + PropertiesStub func(string) error + propertiesMutex sync.RWMutex + propertiesArgsForCall []struct { + arg1 string + } + propertiesReturns struct { + result1 error + } + propertiesReturnsOnCall map[int]struct { + result1 error + } + PutStub func(string, string) error + putMutex sync.RWMutex + putArgsForCall []struct { + arg1 string + arg2 string + } + putReturns struct { + result1 error + } + putReturnsOnCall map[int]struct { + result1 error + } + SignStub func(string, string, time.Duration) (string, error) + signMutex sync.RWMutex + signArgsForCall []struct { + arg1 string + arg2 string + arg3 time.Duration + } + signReturns struct { + result1 string + result2 error + } + signReturnsOnCall map[int]struct { + result1 string + result2 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *FakeStorager) Copy(arg1 string, arg2 string) error { + fake.copyMutex.Lock() + ret, specificReturn := fake.copyReturnsOnCall[len(fake.copyArgsForCall)] + fake.copyArgsForCall = append(fake.copyArgsForCall, struct { + arg1 string + arg2 string + }{arg1, arg2}) + stub := fake.CopyStub + fakeReturns := fake.copyReturns + fake.recordInvocation("Copy", []interface{}{arg1, arg2}) + fake.copyMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeStorager) CopyCallCount() int { + fake.copyMutex.RLock() + defer fake.copyMutex.RUnlock() + return len(fake.copyArgsForCall) +} + +func (fake *FakeStorager) CopyCalls(stub func(string, string) error) { + fake.copyMutex.Lock() + defer fake.copyMutex.Unlock() + fake.CopyStub = stub +} + +func (fake *FakeStorager) CopyArgsForCall(i int) (string, string) { + fake.copyMutex.RLock() + defer fake.copyMutex.RUnlock() + argsForCall := fake.copyArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *FakeStorager) CopyReturns(result1 error) { + fake.copyMutex.Lock() + defer fake.copyMutex.Unlock() + fake.CopyStub = nil + fake.copyReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeStorager) CopyReturnsOnCall(i int, result1 error) { + fake.copyMutex.Lock() + defer fake.copyMutex.Unlock() + fake.CopyStub = nil + if fake.copyReturnsOnCall == nil { + fake.copyReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.copyReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *FakeStorager) Delete(arg1 string) error { + fake.deleteMutex.Lock() + ret, specificReturn := fake.deleteReturnsOnCall[len(fake.deleteArgsForCall)] + fake.deleteArgsForCall = append(fake.deleteArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.DeleteStub + fakeReturns := fake.deleteReturns + fake.recordInvocation("Delete", []interface{}{arg1}) + fake.deleteMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeStorager) DeleteCallCount() int { + fake.deleteMutex.RLock() + defer fake.deleteMutex.RUnlock() + return len(fake.deleteArgsForCall) +} + +func (fake *FakeStorager) DeleteCalls(stub func(string) error) { + fake.deleteMutex.Lock() + defer fake.deleteMutex.Unlock() + fake.DeleteStub = stub +} + +func (fake *FakeStorager) DeleteArgsForCall(i int) string { + fake.deleteMutex.RLock() + defer fake.deleteMutex.RUnlock() + argsForCall := fake.deleteArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeStorager) DeleteReturns(result1 error) { + fake.deleteMutex.Lock() + defer fake.deleteMutex.Unlock() + fake.DeleteStub = nil + fake.deleteReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeStorager) DeleteReturnsOnCall(i int, result1 error) { + fake.deleteMutex.Lock() + defer fake.deleteMutex.Unlock() + fake.DeleteStub = nil + if fake.deleteReturnsOnCall == nil { + fake.deleteReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.deleteReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *FakeStorager) DeleteRecursive(arg1 string) error { + fake.deleteRecursiveMutex.Lock() + ret, specificReturn := fake.deleteRecursiveReturnsOnCall[len(fake.deleteRecursiveArgsForCall)] + fake.deleteRecursiveArgsForCall = append(fake.deleteRecursiveArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.DeleteRecursiveStub + fakeReturns := fake.deleteRecursiveReturns + fake.recordInvocation("DeleteRecursive", []interface{}{arg1}) + fake.deleteRecursiveMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeStorager) DeleteRecursiveCallCount() int { + fake.deleteRecursiveMutex.RLock() + defer fake.deleteRecursiveMutex.RUnlock() + return len(fake.deleteRecursiveArgsForCall) +} + +func (fake *FakeStorager) DeleteRecursiveCalls(stub func(string) error) { + fake.deleteRecursiveMutex.Lock() + defer fake.deleteRecursiveMutex.Unlock() + fake.DeleteRecursiveStub = stub +} + +func (fake *FakeStorager) DeleteRecursiveArgsForCall(i int) string { + fake.deleteRecursiveMutex.RLock() + defer fake.deleteRecursiveMutex.RUnlock() + argsForCall := fake.deleteRecursiveArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeStorager) DeleteRecursiveReturns(result1 error) { + fake.deleteRecursiveMutex.Lock() + defer fake.deleteRecursiveMutex.Unlock() + fake.DeleteRecursiveStub = nil + fake.deleteRecursiveReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeStorager) DeleteRecursiveReturnsOnCall(i int, result1 error) { + fake.deleteRecursiveMutex.Lock() + defer fake.deleteRecursiveMutex.Unlock() + fake.DeleteRecursiveStub = nil + if fake.deleteRecursiveReturnsOnCall == nil { + fake.deleteRecursiveReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.deleteRecursiveReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *FakeStorager) EnsureStorageExists() error { + fake.ensureStorageExistsMutex.Lock() + ret, specificReturn := fake.ensureStorageExistsReturnsOnCall[len(fake.ensureStorageExistsArgsForCall)] + fake.ensureStorageExistsArgsForCall = append(fake.ensureStorageExistsArgsForCall, struct { + }{}) + stub := fake.EnsureStorageExistsStub + fakeReturns := fake.ensureStorageExistsReturns + fake.recordInvocation("EnsureStorageExists", []interface{}{}) + fake.ensureStorageExistsMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeStorager) EnsureStorageExistsCallCount() int { + fake.ensureStorageExistsMutex.RLock() + defer fake.ensureStorageExistsMutex.RUnlock() + return len(fake.ensureStorageExistsArgsForCall) +} + +func (fake *FakeStorager) EnsureStorageExistsCalls(stub func() error) { + fake.ensureStorageExistsMutex.Lock() + defer fake.ensureStorageExistsMutex.Unlock() + fake.EnsureStorageExistsStub = stub +} + +func (fake *FakeStorager) EnsureStorageExistsReturns(result1 error) { + fake.ensureStorageExistsMutex.Lock() + defer fake.ensureStorageExistsMutex.Unlock() + fake.EnsureStorageExistsStub = nil + fake.ensureStorageExistsReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeStorager) EnsureStorageExistsReturnsOnCall(i int, result1 error) { + fake.ensureStorageExistsMutex.Lock() + defer fake.ensureStorageExistsMutex.Unlock() + fake.EnsureStorageExistsStub = nil + if fake.ensureStorageExistsReturnsOnCall == nil { + fake.ensureStorageExistsReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.ensureStorageExistsReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *FakeStorager) Exists(arg1 string) (bool, error) { + fake.existsMutex.Lock() + ret, specificReturn := fake.existsReturnsOnCall[len(fake.existsArgsForCall)] + fake.existsArgsForCall = append(fake.existsArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.ExistsStub + fakeReturns := fake.existsReturns + fake.recordInvocation("Exists", []interface{}{arg1}) + fake.existsMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *FakeStorager) ExistsCallCount() int { + fake.existsMutex.RLock() + defer fake.existsMutex.RUnlock() + return len(fake.existsArgsForCall) +} + +func (fake *FakeStorager) ExistsCalls(stub func(string) (bool, error)) { + fake.existsMutex.Lock() + defer fake.existsMutex.Unlock() + fake.ExistsStub = stub +} + +func (fake *FakeStorager) ExistsArgsForCall(i int) string { + fake.existsMutex.RLock() + defer fake.existsMutex.RUnlock() + argsForCall := fake.existsArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeStorager) ExistsReturns(result1 bool, result2 error) { + fake.existsMutex.Lock() + defer fake.existsMutex.Unlock() + fake.ExistsStub = nil + fake.existsReturns = struct { + result1 bool + result2 error + }{result1, result2} +} + +func (fake *FakeStorager) ExistsReturnsOnCall(i int, result1 bool, result2 error) { + fake.existsMutex.Lock() + defer fake.existsMutex.Unlock() + fake.ExistsStub = nil + if fake.existsReturnsOnCall == nil { + fake.existsReturnsOnCall = make(map[int]struct { + result1 bool + result2 error + }) + } + fake.existsReturnsOnCall[i] = struct { + result1 bool + result2 error + }{result1, result2} +} + +func (fake *FakeStorager) Get(arg1 string, arg2 string) error { + fake.getMutex.Lock() + ret, specificReturn := fake.getReturnsOnCall[len(fake.getArgsForCall)] + fake.getArgsForCall = append(fake.getArgsForCall, struct { + arg1 string + arg2 string + }{arg1, arg2}) + stub := fake.GetStub + fakeReturns := fake.getReturns + fake.recordInvocation("Get", []interface{}{arg1, arg2}) + fake.getMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeStorager) GetCallCount() int { + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + return len(fake.getArgsForCall) +} + +func (fake *FakeStorager) GetCalls(stub func(string, string) error) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = stub +} + +func (fake *FakeStorager) GetArgsForCall(i int) (string, string) { + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + argsForCall := fake.getArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *FakeStorager) GetReturns(result1 error) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = nil + fake.getReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeStorager) GetReturnsOnCall(i int, result1 error) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = nil + if fake.getReturnsOnCall == nil { + fake.getReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.getReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *FakeStorager) List(arg1 string) ([]string, error) { + fake.listMutex.Lock() + ret, specificReturn := fake.listReturnsOnCall[len(fake.listArgsForCall)] + fake.listArgsForCall = append(fake.listArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.ListStub + fakeReturns := fake.listReturns + fake.recordInvocation("List", []interface{}{arg1}) + fake.listMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *FakeStorager) ListCallCount() int { + fake.listMutex.RLock() + defer fake.listMutex.RUnlock() + return len(fake.listArgsForCall) +} + +func (fake *FakeStorager) ListCalls(stub func(string) ([]string, error)) { + fake.listMutex.Lock() + defer fake.listMutex.Unlock() + fake.ListStub = stub +} + +func (fake *FakeStorager) ListArgsForCall(i int) string { + fake.listMutex.RLock() + defer fake.listMutex.RUnlock() + argsForCall := fake.listArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeStorager) ListReturns(result1 []string, result2 error) { + fake.listMutex.Lock() + defer fake.listMutex.Unlock() + fake.ListStub = nil + fake.listReturns = struct { + result1 []string + result2 error + }{result1, result2} +} + +func (fake *FakeStorager) ListReturnsOnCall(i int, result1 []string, result2 error) { + fake.listMutex.Lock() + defer fake.listMutex.Unlock() + fake.ListStub = nil + if fake.listReturnsOnCall == nil { + fake.listReturnsOnCall = make(map[int]struct { + result1 []string + result2 error + }) + } + fake.listReturnsOnCall[i] = struct { + result1 []string + result2 error + }{result1, result2} +} + +func (fake *FakeStorager) Properties(arg1 string) error { + fake.propertiesMutex.Lock() + ret, specificReturn := fake.propertiesReturnsOnCall[len(fake.propertiesArgsForCall)] + fake.propertiesArgsForCall = append(fake.propertiesArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.PropertiesStub + fakeReturns := fake.propertiesReturns + fake.recordInvocation("Properties", []interface{}{arg1}) + fake.propertiesMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeStorager) PropertiesCallCount() int { + fake.propertiesMutex.RLock() + defer fake.propertiesMutex.RUnlock() + return len(fake.propertiesArgsForCall) +} + +func (fake *FakeStorager) PropertiesCalls(stub func(string) error) { + fake.propertiesMutex.Lock() + defer fake.propertiesMutex.Unlock() + fake.PropertiesStub = stub +} + +func (fake *FakeStorager) PropertiesArgsForCall(i int) string { + fake.propertiesMutex.RLock() + defer fake.propertiesMutex.RUnlock() + argsForCall := fake.propertiesArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeStorager) PropertiesReturns(result1 error) { + fake.propertiesMutex.Lock() + defer fake.propertiesMutex.Unlock() + fake.PropertiesStub = nil + fake.propertiesReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeStorager) PropertiesReturnsOnCall(i int, result1 error) { + fake.propertiesMutex.Lock() + defer fake.propertiesMutex.Unlock() + fake.PropertiesStub = nil + if fake.propertiesReturnsOnCall == nil { + fake.propertiesReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.propertiesReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *FakeStorager) Put(arg1 string, arg2 string) error { + fake.putMutex.Lock() + ret, specificReturn := fake.putReturnsOnCall[len(fake.putArgsForCall)] + fake.putArgsForCall = append(fake.putArgsForCall, struct { + arg1 string + arg2 string + }{arg1, arg2}) + stub := fake.PutStub + fakeReturns := fake.putReturns + fake.recordInvocation("Put", []interface{}{arg1, arg2}) + fake.putMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeStorager) PutCallCount() int { + fake.putMutex.RLock() + defer fake.putMutex.RUnlock() + return len(fake.putArgsForCall) +} + +func (fake *FakeStorager) PutCalls(stub func(string, string) error) { + fake.putMutex.Lock() + defer fake.putMutex.Unlock() + fake.PutStub = stub +} + +func (fake *FakeStorager) PutArgsForCall(i int) (string, string) { + fake.putMutex.RLock() + defer fake.putMutex.RUnlock() + argsForCall := fake.putArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *FakeStorager) PutReturns(result1 error) { + fake.putMutex.Lock() + defer fake.putMutex.Unlock() + fake.PutStub = nil + fake.putReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeStorager) PutReturnsOnCall(i int, result1 error) { + fake.putMutex.Lock() + defer fake.putMutex.Unlock() + fake.PutStub = nil + if fake.putReturnsOnCall == nil { + fake.putReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.putReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *FakeStorager) Sign(arg1 string, arg2 string, arg3 time.Duration) (string, error) { + fake.signMutex.Lock() + ret, specificReturn := fake.signReturnsOnCall[len(fake.signArgsForCall)] + fake.signArgsForCall = append(fake.signArgsForCall, struct { + arg1 string + arg2 string + arg3 time.Duration + }{arg1, arg2, arg3}) + stub := fake.SignStub + fakeReturns := fake.signReturns + fake.recordInvocation("Sign", []interface{}{arg1, arg2, arg3}) + fake.signMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *FakeStorager) SignCallCount() int { + fake.signMutex.RLock() + defer fake.signMutex.RUnlock() + return len(fake.signArgsForCall) +} + +func (fake *FakeStorager) SignCalls(stub func(string, string, time.Duration) (string, error)) { + fake.signMutex.Lock() + defer fake.signMutex.Unlock() + fake.SignStub = stub +} + +func (fake *FakeStorager) SignArgsForCall(i int) (string, string, time.Duration) { + fake.signMutex.RLock() + defer fake.signMutex.RUnlock() + argsForCall := fake.signArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *FakeStorager) SignReturns(result1 string, result2 error) { + fake.signMutex.Lock() + defer fake.signMutex.Unlock() + fake.SignStub = nil + fake.signReturns = struct { + result1 string + result2 error + }{result1, result2} +} + +func (fake *FakeStorager) SignReturnsOnCall(i int, result1 string, result2 error) { + fake.signMutex.Lock() + defer fake.signMutex.Unlock() + fake.SignStub = nil + if fake.signReturnsOnCall == nil { + fake.signReturnsOnCall = make(map[int]struct { + result1 string + result2 error + }) + } + fake.signReturnsOnCall[i] = struct { + result1 string + result2 error + }{result1, result2} +} + +func (fake *FakeStorager) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *FakeStorager) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ Storager = new(FakeStorager) diff --git a/storage/storage_suite_test.go b/storage/storage_suite_test.go new file mode 100644 index 0000000..b9d5eee --- /dev/null +++ b/storage/storage_suite_test.go @@ -0,0 +1,13 @@ +package storage + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestStorage(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Storage Suite") +} diff --git a/storage/storager.go b/storage/storager.go new file mode 100644 index 0000000..baa9b3f --- /dev/null +++ b/storage/storager.go @@ -0,0 +1,18 @@ +package storage + +import ( + "time" +) + +type Storager interface { + Put(sourceFilePath string, dest string) error + Get(source string, dest string) error + Delete(dest string) error + DeleteRecursive(prefix string) error + Exists(dest string) (bool, error) + Sign(dest string, action string, expiration time.Duration) (string, error) + List(prefix string) ([]string, error) + Copy(srcBlob string, dstBlob string) error + Properties(dest string) error + EnsureStorageExists() error +} diff --git a/tool.go b/tool.go index e6fa2b8..c4b77c9 100644 --- a/tool.go +++ b/tool.go @@ -1,5 +1,4 @@ //go:build tools -// +build tools package tools diff --git a/tools/tools.go b/tools/tools.go index f206e8f..72fad8b 100644 --- a/tools/tools.go +++ b/tools/tools.go @@ -1,5 +1,4 @@ //go:build tools -// +build tools package tools